xref: /qemu/hw/display/apple-gfx.m (revision 513823e7521a09ed7ad1e32e6454bac3b2cbf52d)
1/*
2 * QEMU Apple ParavirtualizedGraphics.framework device
3 *
4 * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 *
8 * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
9 * which implements 3d graphics passthrough to the host as well as a
10 * proprietary guest communication channel to drive it. This device model
11 * implements support to drive that library from within QEMU.
12 */
13
14#include "qemu/osdep.h"
15#include "qemu/lockable.h"
16#include "qemu/cutils.h"
17#include "qemu/log.h"
18#include "qapi/visitor.h"
19#include "qapi/error.h"
20#include "block/aio-wait.h"
21#include "exec/address-spaces.h"
22#include "system/dma.h"
23#include "migration/blocker.h"
24#include "ui/console.h"
25#include "apple-gfx.h"
26#include "trace.h"
27
28#include <mach/mach.h>
29#include <mach/mach_vm.h>
30#include <dispatch/dispatch.h>
31
32#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
33
34static const AppleGFXDisplayMode apple_gfx_default_modes[] = {
35    { 1920, 1080, 60 },
36    { 1440, 1080, 60 },
37    { 1280, 1024, 60 },
38};
39
40static Error *apple_gfx_mig_blocker;
41static uint32_t next_pgdisplay_serial_num = 1;
42
43static dispatch_queue_t get_background_queue(void)
44{
45    return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
46}
47
48/* ------ PGTask and task operations: new/destroy/map/unmap ------ */
49
50/*
51 * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h>
52 * which is opaque from the framework's point of view. It is used in callbacks
53 * in the form of its typedef PGTask_t, which also already exists in the
54 * framework headers.
55 *
56 * A "task" in PVG terminology represents a host-virtual contiguous address
57 * range which is reserved in a large chunk on task creation. The mapMemory
58 * callback then requests ranges of guest system memory (identified by their
59 * GPA) to be mapped into subranges of this reserved address space.
60 * This type of operation isn't well-supported by QEMU's memory subsystem,
61 * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call,
62 * which allows us to refer to the same backing memory via multiple virtual
63 * address ranges. The Mach VM APIs are therefore used throughout for managing
64 * task memory.
65 */
66struct PGTask_s {
67    QTAILQ_ENTRY(PGTask_s) node;
68    AppleGFXState *s;
69    mach_vm_address_t address;
70    uint64_t len;
71    /*
72     * All unique MemoryRegions for which a mapping has been created in in this
73     * task, and on which we have thus called memory_region_ref(). There are
74     * usually very few regions of system RAM in total, so we expect this array
75     * to be very short. Therefore, no need for sorting or fancy search
76     * algorithms, linear search will do.
77     * Protected by AppleGFXState's task_mutex.
78     */
79    GPtrArray *mapped_regions;
80};
81
82static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len)
83{
84    mach_vm_address_t task_mem;
85    PGTask_t *task;
86    kern_return_t r;
87
88    r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE);
89    if (r != KERN_SUCCESS) {
90        return NULL;
91    }
92
93    task = g_new0(PGTask_t, 1);
94    task->s = s;
95    task->address = task_mem;
96    task->len = len;
97    task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */);
98
99    QEMU_LOCK_GUARD(&s->task_mutex);
100    QTAILQ_INSERT_TAIL(&s->tasks, task, node);
101
102    return task;
103}
104
105static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task)
106{
107    GPtrArray *regions = task->mapped_regions;
108    MemoryRegion *region;
109    size_t i;
110
111    for (i = 0; i < regions->len; ++i) {
112        region = g_ptr_array_index(regions, i);
113        memory_region_unref(region);
114    }
115    g_ptr_array_unref(regions);
116
117    mach_vm_deallocate(mach_task_self(), task->address, task->len);
118
119    QEMU_LOCK_GUARD(&s->task_mutex);
120    QTAILQ_REMOVE(&s->tasks, task, node);
121    g_free(task);
122}
123
124void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
125                                       uint64_t length, bool read_only,
126                                       MemoryRegion **mapping_in_region)
127{
128    MemoryRegion *ram_region;
129    char *host_ptr;
130    hwaddr ram_region_offset = 0;
131    hwaddr ram_region_length = length;
132
133    ram_region = address_space_translate(&address_space_memory,
134                                         guest_physical,
135                                         &ram_region_offset,
136                                         &ram_region_length, !read_only,
137                                         MEMTXATTRS_UNSPECIFIED);
138
139    if (!ram_region || ram_region_length < length ||
140        !memory_access_is_direct(ram_region, !read_only)) {
141        return NULL;
142    }
143
144    host_ptr = memory_region_get_ram_ptr(ram_region);
145    if (!host_ptr) {
146        return NULL;
147    }
148    host_ptr += ram_region_offset;
149    *mapping_in_region = ram_region;
150    return host_ptr;
151}
152
153static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task,
154                                      uint64_t virtual_offset,
155                                      PGPhysicalMemoryRange_t *ranges,
156                                      uint32_t range_count, bool read_only)
157{
158    kern_return_t r;
159    void *source_ptr;
160    mach_vm_address_t target;
161    vm_prot_t cur_protection, max_protection;
162    bool success = true;
163    MemoryRegion *region;
164
165    RCU_READ_LOCK_GUARD();
166    QEMU_LOCK_GUARD(&s->task_mutex);
167
168    trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only);
169    for (int i = 0; i < range_count; i++) {
170        PGPhysicalMemoryRange_t *range = &ranges[i];
171
172        target = task->address + virtual_offset;
173        virtual_offset += range->physicalLength;
174
175        trace_apple_gfx_map_memory_range(i, range->physicalAddress,
176                                         range->physicalLength);
177
178        region = NULL;
179        source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress,
180                                                      range->physicalLength,
181                                                      read_only, &region);
182        if (!source_ptr) {
183            success = false;
184            continue;
185        }
186
187        if (!g_ptr_array_find(task->mapped_regions, region, NULL)) {
188            g_ptr_array_add(task->mapped_regions, region);
189            memory_region_ref(region);
190        }
191
192        cur_protection = 0;
193        max_protection = 0;
194        /* Map guest RAM at range->physicalAddress into PG task memory range */
195        r = mach_vm_remap(mach_task_self(),
196                          &target, range->physicalLength, vm_page_size - 1,
197                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
198                          mach_task_self(), (mach_vm_address_t)source_ptr,
199                          false /* shared mapping, no copy */,
200                          &cur_protection, &max_protection,
201                          VM_INHERIT_COPY);
202        trace_apple_gfx_remap(r, source_ptr, target);
203        g_assert(r == KERN_SUCCESS);
204    }
205
206    return success;
207}
208
209static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task,
210                                        uint64_t virtual_offset, uint64_t length)
211{
212    kern_return_t r;
213    mach_vm_address_t range_address;
214
215    trace_apple_gfx_unmap_memory(task, virtual_offset, length);
216
217    /*
218     * Replace task memory range with fresh 0 pages, undoing the mapping
219     * from guest RAM.
220     */
221    range_address = task->address + virtual_offset;
222    r = mach_vm_allocate(mach_task_self(), &range_address, length,
223                         VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
224    g_assert(r == KERN_SUCCESS);
225}
226
227/* ------ Rendering and frame management ------ */
228
229static void apple_gfx_render_frame_completed_bh(void *opaque);
230
231static void apple_gfx_render_new_frame(AppleGFXState *s)
232{
233    bool managed_texture = s->using_managed_texture_storage;
234    uint32_t width = surface_width(s->surface);
235    uint32_t height = surface_height(s->surface);
236    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
237    id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer];
238    id<MTLTexture> texture = s->texture;
239
240    assert(bql_locked());
241    [texture retain];
242    [command_buffer retain];
243
244    s->rendering_frame_width = width;
245    s->rendering_frame_height = height;
246
247    dispatch_async(get_background_queue(), ^{
248        /*
249         * This is not safe to call from the BQL/BH due to PVG-internal locks
250         * causing deadlocks.
251         */
252        bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer
253                                                 texture:texture
254                                                  region:region];
255        if (!r) {
256            [texture release];
257            [command_buffer release];
258            qemu_log_mask(LOG_GUEST_ERROR,
259                          "%s: encodeCurrentFrameToCommandBuffer:texture:region: "
260                          "failed\n", __func__);
261            bql_lock();
262            --s->pending_frames;
263            if (s->pending_frames > 0) {
264                apple_gfx_render_new_frame(s);
265            }
266            bql_unlock();
267            return;
268        }
269
270        if (managed_texture) {
271            /* "Managed" textures exist in both VRAM and RAM and must be synced. */
272            id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder];
273            [blit synchronizeResource:texture];
274            [blit endEncoding];
275        }
276        [texture release];
277        [command_buffer addCompletedHandler:
278            ^(id<MTLCommandBuffer> cb)
279            {
280                aio_bh_schedule_oneshot(qemu_get_aio_context(),
281                                        apple_gfx_render_frame_completed_bh, s);
282            }];
283        [command_buffer commit];
284        [command_buffer release];
285    });
286}
287
288static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram)
289{
290    /*
291     * TODO: Skip this entirely on a pure Metal or headless/guest-only
292     * rendering path, else use a blit command encoder? Needs careful
293     * (double?) buffering design.
294     */
295    size_t width = texture.width, height = texture.height;
296    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
297    [texture getBytes:vram
298          bytesPerRow:(width * 4)
299        bytesPerImage:(width * height * 4)
300           fromRegion:region
301          mipmapLevel:0
302                slice:0];
303}
304
305static void apple_gfx_render_frame_completed_bh(void *opaque)
306{
307    AppleGFXState *s = opaque;
308
309    @autoreleasepool {
310        --s->pending_frames;
311        assert(s->pending_frames >= 0);
312
313        /* Only update display if mode hasn't changed since we started rendering. */
314        if (s->rendering_frame_width == surface_width(s->surface) &&
315            s->rendering_frame_height == surface_height(s->surface)) {
316            copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface));
317            if (s->gfx_update_requested) {
318                s->gfx_update_requested = false;
319                dpy_gfx_update_full(s->con);
320                graphic_hw_update_done(s->con);
321                s->new_frame_ready = false;
322            } else {
323                s->new_frame_ready = true;
324            }
325        }
326        if (s->pending_frames > 0) {
327            apple_gfx_render_new_frame(s);
328        }
329    }
330}
331
332static void apple_gfx_fb_update_display(void *opaque)
333{
334    AppleGFXState *s = opaque;
335
336    assert(bql_locked());
337    if (s->new_frame_ready) {
338        dpy_gfx_update_full(s->con);
339        s->new_frame_ready = false;
340        graphic_hw_update_done(s->con);
341    } else if (s->pending_frames > 0) {
342        s->gfx_update_requested = true;
343    } else {
344        graphic_hw_update_done(s->con);
345    }
346}
347
348static const GraphicHwOps apple_gfx_fb_ops = {
349    .gfx_update = apple_gfx_fb_update_display,
350    .gfx_update_async = true,
351};
352
353/* ------ Mouse cursor and display mode setting ------ */
354
355static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height)
356{
357    MTLTextureDescriptor *textureDescriptor;
358
359    if (s->surface &&
360        width == surface_width(s->surface) &&
361        height == surface_height(s->surface)) {
362        return;
363    }
364
365    [s->texture release];
366
367    s->surface = qemu_create_displaysurface(width, height);
368
369    @autoreleasepool {
370        textureDescriptor =
371            [MTLTextureDescriptor
372                texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
373                                             width:width
374                                            height:height
375                                         mipmapped:NO];
376        textureDescriptor.usage = s->pgdisp.minimumTextureUsage;
377        s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor];
378        s->using_managed_texture_storage =
379            (s->texture.storageMode == MTLStorageModeManaged);
380    }
381
382    dpy_gfx_replace_surface(s->con, s->surface);
383}
384
385static void update_cursor(AppleGFXState *s)
386{
387    assert(bql_locked());
388    dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x,
389                  s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show));
390}
391
392static void update_cursor_bh(void *opaque)
393{
394    AppleGFXState *s = opaque;
395    update_cursor(s);
396}
397
398typedef struct AppleGFXSetCursorGlyphJob {
399    AppleGFXState *s;
400    NSBitmapImageRep *glyph;
401    PGDisplayCoord_t hotspot;
402} AppleGFXSetCursorGlyphJob;
403
404static void set_cursor_glyph(void *opaque)
405{
406    AppleGFXSetCursorGlyphJob *job = opaque;
407    AppleGFXState *s = job->s;
408    NSBitmapImageRep *glyph = job->glyph;
409    uint32_t bpp = glyph.bitsPerPixel;
410    size_t width = glyph.pixelsWide;
411    size_t height = glyph.pixelsHigh;
412    size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4;
413    const uint8_t* px_data = glyph.bitmapData;
414
415    trace_apple_gfx_cursor_set(bpp, width, height);
416
417    if (s->cursor) {
418        cursor_unref(s->cursor);
419        s->cursor = NULL;
420    }
421
422    if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */
423        s->cursor = cursor_alloc(width, height);
424        s->cursor->hot_x = job->hotspot.x;
425        s->cursor->hot_y = job->hotspot.y;
426
427        uint32_t *dest_px = s->cursor->data;
428
429        for (size_t y = 0; y < height; ++y) {
430            for (size_t x = 0; x < width; ++x) {
431                /*
432                 * NSBitmapImageRep's red & blue channels are swapped
433                 * compared to QEMUCursor's.
434                 */
435                *dest_px =
436                    (px_data[0] << 16u) |
437                    (px_data[1] <<  8u) |
438                    (px_data[2] <<  0u) |
439                    (px_data[3] << 24u);
440                ++dest_px;
441                px_data += 4;
442            }
443            px_data += padding_bytes_per_row;
444        }
445        dpy_cursor_define(s->con, s->cursor);
446        update_cursor(s);
447    }
448    [glyph release];
449
450    g_free(job);
451}
452
453/* ------ DMA (device reading system memory) ------ */
454
455typedef struct AppleGFXReadMemoryJob {
456    QemuSemaphore sem;
457    hwaddr physical_address;
458    uint64_t length;
459    void *dst;
460    bool success;
461} AppleGFXReadMemoryJob;
462
463static void apple_gfx_do_read_memory(void *opaque)
464{
465    AppleGFXReadMemoryJob *job = opaque;
466    MemTxResult r;
467
468    r = dma_memory_read(&address_space_memory, job->physical_address,
469                        job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
470    job->success = (r == MEMTX_OK);
471
472    qemu_sem_post(&job->sem);
473}
474
475static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
476                                  uint64_t length, void *dst)
477{
478    AppleGFXReadMemoryJob job = {
479        .physical_address = physical_address, .length = length, .dst = dst
480    };
481
482    trace_apple_gfx_read_memory(physical_address, length, dst);
483
484    /* Performing DMA requires BQL, so do it in a BH. */
485    qemu_sem_init(&job.sem, 0);
486    aio_bh_schedule_oneshot(qemu_get_aio_context(),
487                            apple_gfx_do_read_memory, &job);
488    qemu_sem_wait(&job.sem);
489    qemu_sem_destroy(&job.sem);
490    return job.success;
491}
492
493/* ------ Memory-mapped device I/O operations ------ */
494
495typedef struct AppleGFXIOJob {
496    AppleGFXState *state;
497    uint64_t offset;
498    uint64_t value;
499    bool completed;
500} AppleGFXIOJob;
501
502static void apple_gfx_do_read(void *opaque)
503{
504    AppleGFXIOJob *job = opaque;
505    job->value = [job->state->pgdev mmioReadAtOffset:job->offset];
506    qatomic_set(&job->completed, true);
507    aio_wait_kick();
508}
509
510static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size)
511{
512    AppleGFXIOJob job = {
513        .state = opaque,
514        .offset = offset,
515        .completed = false,
516    };
517    dispatch_queue_t queue = get_background_queue();
518
519    dispatch_async_f(queue, &job, apple_gfx_do_read);
520    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
521
522    trace_apple_gfx_read(offset, job.value);
523    return job.value;
524}
525
526static void apple_gfx_do_write(void *opaque)
527{
528    AppleGFXIOJob *job = opaque;
529    [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value];
530    qatomic_set(&job->completed, true);
531    aio_wait_kick();
532}
533
534static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val,
535                            unsigned size)
536{
537    /*
538     * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can
539     * trigger synchronous operations on other dispatch queues, which in turn
540     * may call back out on one or more of the callback blocks. For this reason,
541     * and as we are holding the BQL, we invoke the I/O methods on a pool
542     * thread and handle AIO tasks while we wait. Any work in the callbacks
543     * requiring the BQL will in turn schedule BHs which this thread will
544     * process while waiting.
545     */
546    AppleGFXIOJob job = {
547        .state = opaque,
548        .offset = offset,
549        .value = val,
550        .completed = false,
551    };
552    dispatch_queue_t queue = get_background_queue();
553
554    dispatch_async_f(queue, &job, apple_gfx_do_write);
555    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
556
557    trace_apple_gfx_write(offset, val);
558}
559
560static const MemoryRegionOps apple_gfx_ops = {
561    .read = apple_gfx_read,
562    .write = apple_gfx_write,
563    .endianness = DEVICE_LITTLE_ENDIAN,
564    .valid = {
565        .min_access_size = 4,
566        .max_access_size = 8,
567    },
568    .impl = {
569        .min_access_size = 4,
570        .max_access_size = 4,
571    },
572};
573
574static size_t apple_gfx_get_default_mmio_range_size(void)
575{
576    size_t mmio_range_size;
577    @autoreleasepool {
578        PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
579        mmio_range_size = desc.mmioLength;
580        [desc release];
581    }
582    return mmio_range_size;
583}
584
585/* ------ Initialisation and startup ------ */
586
587void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name)
588{
589    size_t mmio_range_size = apple_gfx_get_default_mmio_range_size();
590
591    trace_apple_gfx_common_init(obj_name, mmio_range_size);
592    memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name,
593                          mmio_range_size);
594
595    /* TODO: PVG framework supports serialising device state: integrate it! */
596}
597
598static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s,
599                                                     PGDeviceDescriptor *desc)
600{
601    desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) {
602        PGTask_t *task = apple_gfx_new_task(s, vmSize);
603        *baseAddress = (void *)task->address;
604        trace_apple_gfx_create_task(vmSize, *baseAddress);
605        return task;
606    };
607
608    desc.destroyTask = ^(PGTask_t * _Nonnull task) {
609        trace_apple_gfx_destroy_task(task, task->mapped_regions->len);
610
611        apple_gfx_destroy_task(s, task);
612    };
613
614    desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count,
615                           uint64_t virtual_offset, bool read_only,
616                           PGPhysicalMemoryRange_t * _Nonnull ranges) {
617        return apple_gfx_task_map_memory(s, task, virtual_offset,
618                                         ranges, range_count, read_only);
619    };
620
621    desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset,
622                             uint64_t length) {
623        apple_gfx_task_unmap_memory(s, task, virtual_offset, length);
624        return true;
625    };
626
627    desc.readMemory = ^bool(uint64_t physical_address, uint64_t length,
628                            void * _Nonnull dst) {
629        return apple_gfx_read_memory(s, physical_address, length, dst);
630    };
631}
632
633static void new_frame_handler_bh(void *opaque)
634{
635    AppleGFXState *s = opaque;
636
637    /* Drop frames if guest gets too far ahead. */
638    if (s->pending_frames >= 2) {
639        return;
640    }
641    ++s->pending_frames;
642    if (s->pending_frames > 1) {
643        return;
644    }
645
646    @autoreleasepool {
647        apple_gfx_render_new_frame(s);
648    }
649}
650
651static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s)
652{
653    PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new];
654
655    disp_desc.name = @"QEMU display";
656    disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */
657    disp_desc.queue = dispatch_get_main_queue();
658    disp_desc.newFrameEventHandler = ^(void) {
659        trace_apple_gfx_new_frame();
660        aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s);
661    };
662    disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels,
663                                    OSType pixelFormat) {
664        trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y);
665
666        BQL_LOCK_GUARD();
667        set_mode(s, sizeInPixels.x, sizeInPixels.y);
668    };
669    disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph,
670                                     PGDisplayCoord_t hotspot) {
671        AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job));
672        job->s = s;
673        job->glyph = glyph;
674        job->hotspot = hotspot;
675        [glyph retain];
676        aio_bh_schedule_oneshot(qemu_get_aio_context(),
677                                set_cursor_glyph, job);
678    };
679    disp_desc.cursorShowHandler = ^(BOOL show) {
680        trace_apple_gfx_cursor_show(show);
681        qatomic_set(&s->cursor_show, show);
682        aio_bh_schedule_oneshot(qemu_get_aio_context(),
683                                update_cursor_bh, s);
684    };
685    disp_desc.cursorMoveHandler = ^(void) {
686        trace_apple_gfx_cursor_move();
687        aio_bh_schedule_oneshot(qemu_get_aio_context(),
688                                update_cursor_bh, s);
689    };
690
691    return disp_desc;
692}
693
694static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array(
695    const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count)
696{
697    PGDisplayMode *mode_obj;
698    NSMutableArray<PGDisplayMode *> *mode_array =
699        [[NSMutableArray alloc] initWithCapacity:display_mode_count];
700
701    for (unsigned i = 0; i < display_mode_count; i++) {
702        const AppleGFXDisplayMode *mode = &display_modes[i];
703        trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px);
704        PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px };
705
706        mode_obj =
707            [[PGDisplayMode alloc] initWithSizeInPixels:mode_size
708                                        refreshRateInHz:mode->refresh_rate_hz];
709        [mode_array addObject:mode_obj];
710        [mode_obj release];
711    }
712
713    return mode_array;
714}
715
716static id<MTLDevice> copy_suitable_metal_device(void)
717{
718    id<MTLDevice> dev = nil;
719    NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices();
720
721    /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */
722    for (size_t i = 0; i < devs.count; ++i) {
723        if (devs[i].hasUnifiedMemory) {
724            dev = devs[i];
725            break;
726        }
727        if (!devs[i].removable) {
728            dev = devs[i];
729        }
730    }
731
732    if (dev != nil) {
733        [dev retain];
734    } else {
735        dev = MTLCreateSystemDefaultDevice();
736    }
737    [devs release];
738
739    return dev;
740}
741
742bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
743                              PGDeviceDescriptor *desc, Error **errp)
744{
745    PGDisplayDescriptor *disp_desc;
746    const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes;
747    uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes);
748    NSArray<PGDisplayMode *> *mode_array;
749
750    if (apple_gfx_mig_blocker == NULL) {
751        error_setg(&apple_gfx_mig_blocker,
752                  "Migration state blocked by apple-gfx display device");
753        if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) {
754            return false;
755        }
756    }
757
758    qemu_mutex_init(&s->task_mutex);
759    QTAILQ_INIT(&s->tasks);
760    s->mtl = copy_suitable_metal_device();
761    s->mtl_queue = [s->mtl newCommandQueue];
762
763    desc.device = s->mtl;
764
765    apple_gfx_register_task_mapping_handlers(s, desc);
766
767    s->cursor_show = true;
768
769    s->pgdev = PGNewDeviceWithDescriptor(desc);
770
771    disp_desc = apple_gfx_prepare_display_descriptor(s);
772    /*
773     * Although the framework does, this integration currently does not support
774     * multiple virtual displays connected to a single PV graphics device.
775     * It is however possible to create
776     * more than one instance of the device, each with one display. The macOS
777     * guest will ignore these displays if they share the same serial number,
778     * so ensure each instance gets a unique one.
779     */
780    s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc
781                                              port:0
782                                         serialNum:next_pgdisplay_serial_num++];
783    [disp_desc release];
784
785    if (s->display_modes != NULL && s->num_display_modes > 0) {
786        trace_apple_gfx_common_realize_modes_property(s->num_display_modes);
787        display_modes = s->display_modes;
788        num_display_modes = s->num_display_modes;
789    }
790    s->pgdisp.modeList = mode_array =
791        apple_gfx_create_display_mode_array(display_modes, num_display_modes);
792    [mode_array release];
793
794    s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s);
795    return true;
796}
797
798/* ------ Display mode list device property ------ */
799
800static void apple_gfx_get_display_mode(Object *obj, Visitor *v,
801                                       const char *name, void *opaque,
802                                       Error **errp)
803{
804    Property *prop = opaque;
805    AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
806    /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */
807    char buffer[5 * 3 + 2 + 1];
808    char *pos = buffer;
809
810    int rc = snprintf(buffer, sizeof(buffer),
811                      "%"PRIu16"x%"PRIu16"@%"PRIu16,
812                      mode->width_px, mode->height_px,
813                      mode->refresh_rate_hz);
814    assert(rc < sizeof(buffer));
815
816    visit_type_str(v, name, &pos, errp);
817}
818
819static void apple_gfx_set_display_mode(Object *obj, Visitor *v,
820                                       const char *name, void *opaque,
821                                       Error **errp)
822{
823    Property *prop = opaque;
824    AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
825    const char *endptr;
826    g_autofree char *str = NULL;
827    int ret;
828    int val;
829
830    if (!visit_type_str(v, name, &str, errp)) {
831        return;
832    }
833
834    endptr = str;
835
836    ret = qemu_strtoi(endptr, &endptr, 10, &val);
837    if (ret || val > UINT16_MAX || val <= 0) {
838        error_setg(errp, "width in '%s' must be a decimal integer number"
839                         " of pixels in the range 1..65535", name);
840        return;
841    }
842    mode->width_px = val;
843    if (*endptr != 'x') {
844        goto separator_error;
845    }
846
847    ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
848    if (ret || val > UINT16_MAX || val <= 0) {
849        error_setg(errp, "height in '%s' must be a decimal integer number"
850                         " of pixels in the range 1..65535", name);
851        return;
852    }
853    mode->height_px = val;
854    if (*endptr != '@') {
855        goto separator_error;
856    }
857
858    ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
859    if (ret || val > UINT16_MAX || val <= 0) {
860        error_setg(errp, "refresh rate in '%s'"
861                         " must be a positive decimal integer (Hertz)", name);
862        return;
863    }
864    mode->refresh_rate_hz = val;
865    return;
866
867separator_error:
868    error_setg(errp,
869               "Each display mode takes the format '<width>x<height>@<rate>'");
870}
871
872const PropertyInfo qdev_prop_apple_gfx_display_mode = {
873    .name  = "display_mode",
874    .description =
875        "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> "
876        "Example: 3840x2160@60",
877    .get   = apple_gfx_get_display_mode,
878    .set   = apple_gfx_set_display_mode,
879};
880