Lines Matching +full:- +full:- +full:-

10  * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
24 #include "hw/vfio/vfio-region.h"
25 #include "hw/vfio/vfio-device.h"
29 #include "qemu/error-report.h"
32 #include "vfio-helpers.h"
35 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
41 VFIODevice *vbasedev = region->vbasedev; in vfio_region_write()
68 ret = vbasedev->io_ops->region_write(vbasedev, region->nr, in vfio_region_write()
69 addr, size, &buf, region->post_wr); in vfio_region_write()
73 __func__, vbasedev->name, region->nr, in vfio_region_write()
77 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size); in vfio_region_write()
87 vbasedev->ops->vfio_eoi(vbasedev); in vfio_region_write()
94 VFIODevice *vbasedev = region->vbasedev; in vfio_region_read()
104 ret = vbasedev->io_ops->region_read(vbasedev, region->nr, addr, size, &buf); in vfio_region_read()
107 __func__, vbasedev->name, region->nr, in vfio_region_read()
109 return (uint64_t)-1; in vfio_region_read()
129 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data); in vfio_region_read()
132 vbasedev->ops->vfio_eoi(vbasedev); in vfio_region_read()
160 return -ENODEV; in vfio_setup_region_sparse_mmaps()
165 trace_vfio_region_sparse_mmap_header(region->vbasedev->name, in vfio_setup_region_sparse_mmaps()
166 region->nr, sparse->nr_areas); in vfio_setup_region_sparse_mmaps()
168 region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); in vfio_setup_region_sparse_mmaps()
170 for (i = 0, j = 0; i < sparse->nr_areas; i++) { in vfio_setup_region_sparse_mmaps()
171 if (sparse->areas[i].size) { in vfio_setup_region_sparse_mmaps()
172 trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, in vfio_setup_region_sparse_mmaps()
173 sparse->areas[i].offset + in vfio_setup_region_sparse_mmaps()
174 sparse->areas[i].size - 1); in vfio_setup_region_sparse_mmaps()
175 region->mmaps[j].offset = sparse->areas[i].offset; in vfio_setup_region_sparse_mmaps()
176 region->mmaps[j].size = sparse->areas[i].size; in vfio_setup_region_sparse_mmaps()
181 region->nr_mmaps = j; in vfio_setup_region_sparse_mmaps()
182 region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); in vfio_setup_region_sparse_mmaps()
198 region->vbasedev = vbasedev; in vfio_region_setup()
199 region->flags = info->flags; in vfio_region_setup()
200 region->size = info->size; in vfio_region_setup()
201 region->fd_offset = info->offset; in vfio_region_setup()
202 region->nr = index; in vfio_region_setup()
203 region->post_wr = false; in vfio_region_setup()
205 if (region->size) { in vfio_region_setup()
206 region->mem = g_new0(MemoryRegion, 1); in vfio_region_setup()
207 memory_region_init_io(region->mem, obj, &vfio_region_ops, in vfio_region_setup()
208 region, name, region->size); in vfio_region_setup()
210 if (!vbasedev->no_mmap && in vfio_region_setup()
211 region->flags & VFIO_REGION_INFO_FLAG_MMAP) { in vfio_region_setup()
216 region->nr_mmaps = 1; in vfio_region_setup()
217 region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); in vfio_region_setup()
218 region->mmaps[0].offset = 0; in vfio_region_setup()
219 region->mmaps[0].size = region->size; in vfio_region_setup()
224 trace_vfio_region_setup(vbasedev->name, index, name, in vfio_region_setup()
225 region->flags, region->fd_offset, region->size); in vfio_region_setup()
231 trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem), in vfio_subregion_unmap()
232 region->mmaps[index].offset, in vfio_subregion_unmap()
233 region->mmaps[index].offset + in vfio_subregion_unmap()
234 region->mmaps[index].size - 1); in vfio_subregion_unmap()
235 memory_region_del_subregion(region->mem, &region->mmaps[index].mem); in vfio_subregion_unmap()
236 munmap(region->mmaps[index].mmap, region->mmaps[index].size); in vfio_subregion_unmap()
237 object_unparent(OBJECT(&region->mmaps[index].mem)); in vfio_subregion_unmap()
238 region->mmaps[index].mmap = NULL; in vfio_subregion_unmap()
247 if (!region->mem) { in vfio_region_mmap()
251 prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0; in vfio_region_mmap()
252 prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0; in vfio_region_mmap()
254 for (i = 0; i < region->nr_mmaps; i++) { in vfio_region_mmap()
255 size_t align = MIN(1ULL << ctz64(region->mmaps[i].size), 1 * GiB); in vfio_region_mmap()
263 * on x86_64). Align by power-of-two size, capped at 1GiB. in vfio_region_mmap()
269 map_base = mmap(0, region->mmaps[i].size + align, PROT_NONE, in vfio_region_mmap()
270 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in vfio_region_mmap()
272 ret = -errno; in vfio_region_mmap()
276 /* Use the per-region fd if set, or the shared fd. */ in vfio_region_mmap()
277 fd = region->vbasedev->region_fds ? in vfio_region_mmap()
278 region->vbasedev->region_fds[region->nr] : in vfio_region_mmap()
279 region->vbasedev->fd, in vfio_region_mmap()
282 munmap(map_base, map_align - map_base); in vfio_region_mmap()
283 munmap(map_align + region->mmaps[i].size, in vfio_region_mmap()
284 align - (map_align - map_base)); in vfio_region_mmap()
286 region->mmaps[i].mmap = mmap(map_align, region->mmaps[i].size, prot, in vfio_region_mmap()
288 region->fd_offset + in vfio_region_mmap()
289 region->mmaps[i].offset); in vfio_region_mmap()
290 if (region->mmaps[i].mmap == MAP_FAILED) { in vfio_region_mmap()
291 ret = -errno; in vfio_region_mmap()
296 memory_region_name(region->mem), i); in vfio_region_mmap()
297 memory_region_init_ram_device_ptr(&region->mmaps[i].mem, in vfio_region_mmap()
298 memory_region_owner(region->mem), in vfio_region_mmap()
299 name, region->mmaps[i].size, in vfio_region_mmap()
300 region->mmaps[i].mmap); in vfio_region_mmap()
302 memory_region_add_subregion(region->mem, region->mmaps[i].offset, in vfio_region_mmap()
303 &region->mmaps[i].mem); in vfio_region_mmap()
305 trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem), in vfio_region_mmap()
306 region->mmaps[i].offset, in vfio_region_mmap()
307 region->mmaps[i].offset + in vfio_region_mmap()
308 region->mmaps[i].size - 1); in vfio_region_mmap()
314 trace_vfio_region_mmap_fault(memory_region_name(region->mem), i, in vfio_region_mmap()
315 region->fd_offset + region->mmaps[i].offset, in vfio_region_mmap()
316 region->fd_offset + region->mmaps[i].offset + in vfio_region_mmap()
317 region->mmaps[i].size - 1, ret); in vfio_region_mmap()
319 region->mmaps[i].mmap = NULL; in vfio_region_mmap()
321 for (i--; i >= 0; i--) { in vfio_region_mmap()
332 if (!region->mem) { in vfio_region_unmap()
336 for (i = 0; i < region->nr_mmaps; i++) { in vfio_region_unmap()
337 if (region->mmaps[i].mmap) { in vfio_region_unmap()
347 if (!region->mem) { in vfio_region_exit()
351 for (i = 0; i < region->nr_mmaps; i++) { in vfio_region_exit()
352 if (region->mmaps[i].mmap) { in vfio_region_exit()
353 memory_region_del_subregion(region->mem, &region->mmaps[i].mem); in vfio_region_exit()
357 trace_vfio_region_exit(region->vbasedev->name, region->nr); in vfio_region_exit()
364 if (!region->mem) { in vfio_region_finalize()
368 for (i = 0; i < region->nr_mmaps; i++) { in vfio_region_finalize()
369 if (region->mmaps[i].mmap) { in vfio_region_finalize()
370 munmap(region->mmaps[i].mmap, region->mmaps[i].size); in vfio_region_finalize()
371 object_unparent(OBJECT(&region->mmaps[i].mem)); in vfio_region_finalize()
375 object_unparent(OBJECT(region->mem)); in vfio_region_finalize()
377 g_free(region->mem); in vfio_region_finalize()
378 g_free(region->mmaps); in vfio_region_finalize()
380 trace_vfio_region_finalize(region->vbasedev->name, region->nr); in vfio_region_finalize()
382 region->mem = NULL; in vfio_region_finalize()
383 region->mmaps = NULL; in vfio_region_finalize()
384 region->nr_mmaps = 0; in vfio_region_finalize()
385 region->size = 0; in vfio_region_finalize()
386 region->flags = 0; in vfio_region_finalize()
387 region->nr = 0; in vfio_region_finalize()
394 if (!region->mem) { in vfio_region_mmaps_set_enabled()
398 for (i = 0; i < region->nr_mmaps; i++) { in vfio_region_mmaps_set_enabled()
399 if (region->mmaps[i].mmap) { in vfio_region_mmaps_set_enabled()
400 memory_region_set_enabled(&region->mmaps[i].mem, enabled); in vfio_region_mmaps_set_enabled()
404 trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem), in vfio_region_mmaps_set_enabled()