xref: /qemu/hw/display/apple-gfx.m (revision 96215036f47403438c7c7869b7cd419bd7a11f82)
1/*
2 * QEMU Apple ParavirtualizedGraphics.framework device
3 *
4 * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 *
8 * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
9 * which implements 3d graphics passthrough to the host as well as a
10 * proprietary guest communication channel to drive it. This device model
11 * implements support to drive that library from within QEMU.
12 */
13
14#include "qemu/osdep.h"
15#include "qemu/lockable.h"
16#include "qemu/cutils.h"
17#include "qemu/log.h"
18#include "qapi/visitor.h"
19#include "qapi/error.h"
20#include "block/aio-wait.h"
21#include "system/address-spaces.h"
22#include "system/dma.h"
23#include "migration/blocker.h"
24#include "ui/console.h"
25#include "apple-gfx.h"
26#include "trace.h"
27
28#include <mach/mach.h>
29#include <mach/mach_vm.h>
30#include <dispatch/dispatch.h>
31
32#import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
33
34static const AppleGFXDisplayMode apple_gfx_default_modes[] = {
35    { 1920, 1080, 60 },
36    { 1440, 1080, 60 },
37    { 1280, 1024, 60 },
38};
39
40static Error *apple_gfx_mig_blocker;
41static uint32_t next_pgdisplay_serial_num = 1;
42
43static dispatch_queue_t get_background_queue(void)
44{
45    return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
46}
47
48/* ------ PGTask and task operations: new/destroy/map/unmap ------ */
49
50/*
51 * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h>
52 * which is opaque from the framework's point of view. It is used in callbacks
53 * in the form of its typedef PGTask_t, which also already exists in the
54 * framework headers.
55 *
56 * A "task" in PVG terminology represents a host-virtual contiguous address
57 * range which is reserved in a large chunk on task creation. The mapMemory
58 * callback then requests ranges of guest system memory (identified by their
59 * GPA) to be mapped into subranges of this reserved address space.
60 * This type of operation isn't well-supported by QEMU's memory subsystem,
61 * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call,
62 * which allows us to refer to the same backing memory via multiple virtual
63 * address ranges. The Mach VM APIs are therefore used throughout for managing
64 * task memory.
65 */
66struct PGTask_s {
67    QTAILQ_ENTRY(PGTask_s) node;
68    AppleGFXState *s;
69    mach_vm_address_t address;
70    uint64_t len;
71    /*
72     * All unique MemoryRegions for which a mapping has been created in this
73     * task, and on which we have thus called memory_region_ref(). There are
74     * usually very few regions of system RAM in total, so we expect this array
75     * to be very short. Therefore, no need for sorting or fancy search
76     * algorithms, linear search will do.
77     * Protected by AppleGFXState's task_mutex.
78     */
79    GPtrArray *mapped_regions;
80};
81
82static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len)
83{
84    mach_vm_address_t task_mem;
85    PGTask_t *task;
86    kern_return_t r;
87
88    r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE);
89    if (r != KERN_SUCCESS) {
90        return NULL;
91    }
92
93    task = g_new0(PGTask_t, 1);
94    task->s = s;
95    task->address = task_mem;
96    task->len = len;
97    task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */);
98
99    QEMU_LOCK_GUARD(&s->task_mutex);
100    QTAILQ_INSERT_TAIL(&s->tasks, task, node);
101
102    return task;
103}
104
105static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task)
106{
107    GPtrArray *regions = task->mapped_regions;
108    MemoryRegion *region;
109    size_t i;
110
111    for (i = 0; i < regions->len; ++i) {
112        region = g_ptr_array_index(regions, i);
113        memory_region_unref(region);
114    }
115    g_ptr_array_unref(regions);
116
117    mach_vm_deallocate(mach_task_self(), task->address, task->len);
118
119    QEMU_LOCK_GUARD(&s->task_mutex);
120    QTAILQ_REMOVE(&s->tasks, task, node);
121    g_free(task);
122}
123
124void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
125                                       uint64_t length, bool read_only,
126                                       MemoryRegion **mapping_in_region)
127{
128    MemoryRegion *ram_region;
129    char *host_ptr;
130    hwaddr ram_region_offset = 0;
131    hwaddr ram_region_length = length;
132
133    ram_region = address_space_translate(&address_space_memory,
134                                         guest_physical,
135                                         &ram_region_offset,
136                                         &ram_region_length, !read_only,
137                                         MEMTXATTRS_UNSPECIFIED);
138
139    if (!ram_region || ram_region_length < length ||
140        !memory_access_is_direct(ram_region, !read_only,
141				 MEMTXATTRS_UNSPECIFIED)) {
142        return NULL;
143    }
144
145    host_ptr = memory_region_get_ram_ptr(ram_region);
146    if (!host_ptr) {
147        return NULL;
148    }
149    host_ptr += ram_region_offset;
150    *mapping_in_region = ram_region;
151    return host_ptr;
152}
153
154static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task,
155                                      uint64_t virtual_offset,
156                                      PGPhysicalMemoryRange_t *ranges,
157                                      uint32_t range_count, bool read_only)
158{
159    kern_return_t r;
160    void *source_ptr;
161    mach_vm_address_t target;
162    vm_prot_t cur_protection, max_protection;
163    bool success = true;
164    MemoryRegion *region;
165
166    RCU_READ_LOCK_GUARD();
167    QEMU_LOCK_GUARD(&s->task_mutex);
168
169    trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only);
170    for (int i = 0; i < range_count; i++) {
171        PGPhysicalMemoryRange_t *range = &ranges[i];
172
173        target = task->address + virtual_offset;
174        virtual_offset += range->physicalLength;
175
176        trace_apple_gfx_map_memory_range(i, range->physicalAddress,
177                                         range->physicalLength);
178
179        region = NULL;
180        source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress,
181                                                      range->physicalLength,
182                                                      read_only, &region);
183        if (!source_ptr) {
184            success = false;
185            continue;
186        }
187
188        if (!g_ptr_array_find(task->mapped_regions, region, NULL)) {
189            g_ptr_array_add(task->mapped_regions, region);
190            memory_region_ref(region);
191        }
192
193        cur_protection = 0;
194        max_protection = 0;
195        /* Map guest RAM at range->physicalAddress into PG task memory range */
196        r = mach_vm_remap(mach_task_self(),
197                          &target, range->physicalLength, vm_page_size - 1,
198                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
199                          mach_task_self(), (mach_vm_address_t)source_ptr,
200                          false /* shared mapping, no copy */,
201                          &cur_protection, &max_protection,
202                          VM_INHERIT_COPY);
203        trace_apple_gfx_remap(r, source_ptr, target);
204        g_assert(r == KERN_SUCCESS);
205    }
206
207    return success;
208}
209
210static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task,
211                                        uint64_t virtual_offset, uint64_t length)
212{
213    kern_return_t r;
214    mach_vm_address_t range_address;
215
216    trace_apple_gfx_unmap_memory(task, virtual_offset, length);
217
218    /*
219     * Replace task memory range with fresh 0 pages, undoing the mapping
220     * from guest RAM.
221     */
222    range_address = task->address + virtual_offset;
223    r = mach_vm_allocate(mach_task_self(), &range_address, length,
224                         VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
225    g_assert(r == KERN_SUCCESS);
226}
227
228/* ------ Rendering and frame management ------ */
229
230static void apple_gfx_render_frame_completed_bh(void *opaque);
231
232static void apple_gfx_render_new_frame(AppleGFXState *s)
233{
234    bool managed_texture = s->using_managed_texture_storage;
235    uint32_t width = surface_width(s->surface);
236    uint32_t height = surface_height(s->surface);
237    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
238    id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer];
239    id<MTLTexture> texture = s->texture;
240
241    assert(bql_locked());
242    [texture retain];
243    [command_buffer retain];
244
245    s->rendering_frame_width = width;
246    s->rendering_frame_height = height;
247
248    dispatch_async(get_background_queue(), ^{
249        /*
250         * This is not safe to call from the BQL/BH due to PVG-internal locks
251         * causing deadlocks.
252         */
253        bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer
254                                                 texture:texture
255                                                  region:region];
256        if (!r) {
257            [texture release];
258            [command_buffer release];
259            qemu_log_mask(LOG_GUEST_ERROR,
260                          "%s: encodeCurrentFrameToCommandBuffer:texture:region: "
261                          "failed\n", __func__);
262            bql_lock();
263            --s->pending_frames;
264            if (s->pending_frames > 0) {
265                apple_gfx_render_new_frame(s);
266            }
267            bql_unlock();
268            return;
269        }
270
271        if (managed_texture) {
272            /* "Managed" textures exist in both VRAM and RAM and must be synced. */
273            id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder];
274            [blit synchronizeResource:texture];
275            [blit endEncoding];
276        }
277        [texture release];
278        [command_buffer addCompletedHandler:
279            ^(id<MTLCommandBuffer> cb)
280            {
281                aio_bh_schedule_oneshot(qemu_get_aio_context(),
282                                        apple_gfx_render_frame_completed_bh, s);
283            }];
284        [command_buffer commit];
285        [command_buffer release];
286    });
287}
288
289static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram)
290{
291    /*
292     * TODO: Skip this entirely on a pure Metal or headless/guest-only
293     * rendering path, else use a blit command encoder? Needs careful
294     * (double?) buffering design.
295     */
296    size_t width = texture.width, height = texture.height;
297    MTLRegion region = MTLRegionMake2D(0, 0, width, height);
298    [texture getBytes:vram
299          bytesPerRow:(width * 4)
300        bytesPerImage:(width * height * 4)
301           fromRegion:region
302          mipmapLevel:0
303                slice:0];
304}
305
306static void apple_gfx_render_frame_completed_bh(void *opaque)
307{
308    AppleGFXState *s = opaque;
309
310    @autoreleasepool {
311        --s->pending_frames;
312        assert(s->pending_frames >= 0);
313
314        /* Only update display if mode hasn't changed since we started rendering. */
315        if (s->rendering_frame_width == surface_width(s->surface) &&
316            s->rendering_frame_height == surface_height(s->surface)) {
317            copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface));
318            if (s->gfx_update_requested) {
319                s->gfx_update_requested = false;
320                dpy_gfx_update_full(s->con);
321                graphic_hw_update_done(s->con);
322                s->new_frame_ready = false;
323            } else {
324                s->new_frame_ready = true;
325            }
326        }
327        if (s->pending_frames > 0) {
328            apple_gfx_render_new_frame(s);
329        }
330    }
331}
332
333static void apple_gfx_fb_update_display(void *opaque)
334{
335    AppleGFXState *s = opaque;
336
337    assert(bql_locked());
338    if (s->new_frame_ready) {
339        dpy_gfx_update_full(s->con);
340        s->new_frame_ready = false;
341        graphic_hw_update_done(s->con);
342    } else if (s->pending_frames > 0) {
343        s->gfx_update_requested = true;
344    } else {
345        graphic_hw_update_done(s->con);
346    }
347}
348
349static const GraphicHwOps apple_gfx_fb_ops = {
350    .gfx_update = apple_gfx_fb_update_display,
351    .gfx_update_async = true,
352};
353
354/* ------ Mouse cursor and display mode setting ------ */
355
356static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height)
357{
358    MTLTextureDescriptor *textureDescriptor;
359
360    if (s->surface &&
361        width == surface_width(s->surface) &&
362        height == surface_height(s->surface)) {
363        return;
364    }
365
366    [s->texture release];
367
368    s->surface = qemu_create_displaysurface(width, height);
369
370    @autoreleasepool {
371        textureDescriptor =
372            [MTLTextureDescriptor
373                texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
374                                             width:width
375                                            height:height
376                                         mipmapped:NO];
377        textureDescriptor.usage = s->pgdisp.minimumTextureUsage;
378        s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor];
379        s->using_managed_texture_storage =
380            (s->texture.storageMode == MTLStorageModeManaged);
381    }
382
383    dpy_gfx_replace_surface(s->con, s->surface);
384}
385
386static void update_cursor(AppleGFXState *s)
387{
388    assert(bql_locked());
389    dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x,
390                  s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show));
391}
392
393static void update_cursor_bh(void *opaque)
394{
395    AppleGFXState *s = opaque;
396    update_cursor(s);
397}
398
399typedef struct AppleGFXSetCursorGlyphJob {
400    AppleGFXState *s;
401    NSBitmapImageRep *glyph;
402    PGDisplayCoord_t hotspot;
403} AppleGFXSetCursorGlyphJob;
404
405static void set_cursor_glyph(void *opaque)
406{
407    AppleGFXSetCursorGlyphJob *job = opaque;
408    AppleGFXState *s = job->s;
409    NSBitmapImageRep *glyph = job->glyph;
410    uint32_t bpp = glyph.bitsPerPixel;
411    size_t width = glyph.pixelsWide;
412    size_t height = glyph.pixelsHigh;
413    size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4;
414    const uint8_t* px_data = glyph.bitmapData;
415
416    trace_apple_gfx_cursor_set(bpp, width, height);
417
418    if (s->cursor) {
419        cursor_unref(s->cursor);
420        s->cursor = NULL;
421    }
422
423    if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */
424        s->cursor = cursor_alloc(width, height);
425        s->cursor->hot_x = job->hotspot.x;
426        s->cursor->hot_y = job->hotspot.y;
427
428        uint32_t *dest_px = s->cursor->data;
429
430        for (size_t y = 0; y < height; ++y) {
431            for (size_t x = 0; x < width; ++x) {
432                /*
433                 * NSBitmapImageRep's red & blue channels are swapped
434                 * compared to QEMUCursor's.
435                 */
436                *dest_px =
437                    (px_data[0] << 16u) |
438                    (px_data[1] <<  8u) |
439                    (px_data[2] <<  0u) |
440                    (px_data[3] << 24u);
441                ++dest_px;
442                px_data += 4;
443            }
444            px_data += padding_bytes_per_row;
445        }
446        dpy_cursor_define(s->con, s->cursor);
447        update_cursor(s);
448    }
449    [glyph release];
450
451    g_free(job);
452}
453
454/* ------ DMA (device reading system memory) ------ */
455
456typedef struct AppleGFXReadMemoryJob {
457    QemuEvent event;
458    hwaddr physical_address;
459    uint64_t length;
460    void *dst;
461    bool success;
462} AppleGFXReadMemoryJob;
463
464static void apple_gfx_do_read_memory(void *opaque)
465{
466    AppleGFXReadMemoryJob *job = opaque;
467    MemTxResult r;
468
469    r = dma_memory_read(&address_space_memory, job->physical_address,
470                        job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
471    job->success = (r == MEMTX_OK);
472
473    qemu_event_set(&job->event);
474}
475
476static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
477                                  uint64_t length, void *dst)
478{
479    AppleGFXReadMemoryJob job = {
480        .physical_address = physical_address, .length = length, .dst = dst
481    };
482
483    trace_apple_gfx_read_memory(physical_address, length, dst);
484
485    /* Performing DMA requires BQL, so do it in a BH. */
486    qemu_event_init(&job.event, 0);
487    aio_bh_schedule_oneshot(qemu_get_aio_context(),
488                            apple_gfx_do_read_memory, &job);
489    qemu_event_wait(&job.event);
490    qemu_event_destroy(&job.event);
491    return job.success;
492}
493
494/* ------ Memory-mapped device I/O operations ------ */
495
496typedef struct AppleGFXIOJob {
497    AppleGFXState *state;
498    uint64_t offset;
499    uint64_t value;
500    bool completed;
501} AppleGFXIOJob;
502
503static void apple_gfx_do_read(void *opaque)
504{
505    AppleGFXIOJob *job = opaque;
506    job->value = [job->state->pgdev mmioReadAtOffset:job->offset];
507    qatomic_set(&job->completed, true);
508    aio_wait_kick();
509}
510
511static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size)
512{
513    AppleGFXIOJob job = {
514        .state = opaque,
515        .offset = offset,
516        .completed = false,
517    };
518    dispatch_queue_t queue = get_background_queue();
519
520    dispatch_async_f(queue, &job, apple_gfx_do_read);
521    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
522
523    trace_apple_gfx_read(offset, job.value);
524    return job.value;
525}
526
527static void apple_gfx_do_write(void *opaque)
528{
529    AppleGFXIOJob *job = opaque;
530    [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value];
531    qatomic_set(&job->completed, true);
532    aio_wait_kick();
533}
534
535static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val,
536                            unsigned size)
537{
538    /*
539     * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can
540     * trigger synchronous operations on other dispatch queues, which in turn
541     * may call back out on one or more of the callback blocks. For this reason,
542     * and as we are holding the BQL, we invoke the I/O methods on a pool
543     * thread and handle AIO tasks while we wait. Any work in the callbacks
544     * requiring the BQL will in turn schedule BHs which this thread will
545     * process while waiting.
546     */
547    AppleGFXIOJob job = {
548        .state = opaque,
549        .offset = offset,
550        .value = val,
551        .completed = false,
552    };
553    dispatch_queue_t queue = get_background_queue();
554
555    dispatch_async_f(queue, &job, apple_gfx_do_write);
556    AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
557
558    trace_apple_gfx_write(offset, val);
559}
560
561static const MemoryRegionOps apple_gfx_ops = {
562    .read = apple_gfx_read,
563    .write = apple_gfx_write,
564    .endianness = DEVICE_LITTLE_ENDIAN,
565    .valid = {
566        .min_access_size = 4,
567        .max_access_size = 8,
568    },
569    .impl = {
570        .min_access_size = 4,
571        .max_access_size = 4,
572    },
573};
574
575static size_t apple_gfx_get_default_mmio_range_size(void)
576{
577    size_t mmio_range_size;
578    @autoreleasepool {
579        PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
580        mmio_range_size = desc.mmioLength;
581        [desc release];
582    }
583    return mmio_range_size;
584}
585
586/* ------ Initialisation and startup ------ */
587
588void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name)
589{
590    size_t mmio_range_size = apple_gfx_get_default_mmio_range_size();
591
592    trace_apple_gfx_common_init(obj_name, mmio_range_size);
593    memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name,
594                          mmio_range_size);
595
596    /* TODO: PVG framework supports serialising device state: integrate it! */
597}
598
599static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s,
600                                                     PGDeviceDescriptor *desc)
601{
602    desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) {
603        PGTask_t *task = apple_gfx_new_task(s, vmSize);
604        *baseAddress = (void *)task->address;
605        trace_apple_gfx_create_task(vmSize, *baseAddress);
606        return task;
607    };
608
609    desc.destroyTask = ^(PGTask_t * _Nonnull task) {
610        trace_apple_gfx_destroy_task(task, task->mapped_regions->len);
611
612        apple_gfx_destroy_task(s, task);
613    };
614
615    desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count,
616                           uint64_t virtual_offset, bool read_only,
617                           PGPhysicalMemoryRange_t * _Nonnull ranges) {
618        return apple_gfx_task_map_memory(s, task, virtual_offset,
619                                         ranges, range_count, read_only);
620    };
621
622    desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset,
623                             uint64_t length) {
624        apple_gfx_task_unmap_memory(s, task, virtual_offset, length);
625        return true;
626    };
627
628    desc.readMemory = ^bool(uint64_t physical_address, uint64_t length,
629                            void * _Nonnull dst) {
630        return apple_gfx_read_memory(s, physical_address, length, dst);
631    };
632}
633
634static void new_frame_handler_bh(void *opaque)
635{
636    AppleGFXState *s = opaque;
637
638    /* Drop frames if guest gets too far ahead. */
639    if (s->pending_frames >= 2) {
640        return;
641    }
642    ++s->pending_frames;
643    if (s->pending_frames > 1) {
644        return;
645    }
646
647    @autoreleasepool {
648        apple_gfx_render_new_frame(s);
649    }
650}
651
652static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s)
653{
654    PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new];
655
656    disp_desc.name = @"QEMU display";
657    disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */
658    disp_desc.queue = dispatch_get_main_queue();
659    disp_desc.newFrameEventHandler = ^(void) {
660        trace_apple_gfx_new_frame();
661        aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s);
662    };
663    disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels,
664                                    OSType pixelFormat) {
665        trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y);
666
667        BQL_LOCK_GUARD();
668        set_mode(s, sizeInPixels.x, sizeInPixels.y);
669    };
670    disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph,
671                                     PGDisplayCoord_t hotspot) {
672        AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job));
673        job->s = s;
674        job->glyph = glyph;
675        job->hotspot = hotspot;
676        [glyph retain];
677        aio_bh_schedule_oneshot(qemu_get_aio_context(),
678                                set_cursor_glyph, job);
679    };
680    disp_desc.cursorShowHandler = ^(BOOL show) {
681        trace_apple_gfx_cursor_show(show);
682        qatomic_set(&s->cursor_show, show);
683        aio_bh_schedule_oneshot(qemu_get_aio_context(),
684                                update_cursor_bh, s);
685    };
686    disp_desc.cursorMoveHandler = ^(void) {
687        trace_apple_gfx_cursor_move();
688        aio_bh_schedule_oneshot(qemu_get_aio_context(),
689                                update_cursor_bh, s);
690    };
691
692    return disp_desc;
693}
694
695static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array(
696    const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count)
697{
698    PGDisplayMode *mode_obj;
699    NSMutableArray<PGDisplayMode *> *mode_array =
700        [[NSMutableArray alloc] initWithCapacity:display_mode_count];
701
702    for (unsigned i = 0; i < display_mode_count; i++) {
703        const AppleGFXDisplayMode *mode = &display_modes[i];
704        trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px);
705        PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px };
706
707        mode_obj =
708            [[PGDisplayMode alloc] initWithSizeInPixels:mode_size
709                                        refreshRateInHz:mode->refresh_rate_hz];
710        [mode_array addObject:mode_obj];
711        [mode_obj release];
712    }
713
714    return mode_array;
715}
716
717static id<MTLDevice> copy_suitable_metal_device(void)
718{
719    id<MTLDevice> dev = nil;
720    NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices();
721
722    /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */
723    for (size_t i = 0; i < devs.count; ++i) {
724        if (devs[i].hasUnifiedMemory) {
725            dev = devs[i];
726            break;
727        }
728        if (!devs[i].removable) {
729            dev = devs[i];
730        }
731    }
732
733    if (dev != nil) {
734        [dev retain];
735    } else {
736        dev = MTLCreateSystemDefaultDevice();
737    }
738    [devs release];
739
740    return dev;
741}
742
743bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
744                              PGDeviceDescriptor *desc, Error **errp)
745{
746    PGDisplayDescriptor *disp_desc;
747    const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes;
748    uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes);
749    NSArray<PGDisplayMode *> *mode_array;
750
751    if (apple_gfx_mig_blocker == NULL) {
752        error_setg(&apple_gfx_mig_blocker,
753                  "Migration state blocked by apple-gfx display device");
754        if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) {
755            return false;
756        }
757    }
758
759    qemu_mutex_init(&s->task_mutex);
760    QTAILQ_INIT(&s->tasks);
761    s->mtl = copy_suitable_metal_device();
762    s->mtl_queue = [s->mtl newCommandQueue];
763
764    desc.device = s->mtl;
765
766    apple_gfx_register_task_mapping_handlers(s, desc);
767
768    s->cursor_show = true;
769
770    s->pgdev = PGNewDeviceWithDescriptor(desc);
771
772    disp_desc = apple_gfx_prepare_display_descriptor(s);
773    /*
774     * Although the framework does, this integration currently does not support
775     * multiple virtual displays connected to a single PV graphics device.
776     * It is however possible to create
777     * more than one instance of the device, each with one display. The macOS
778     * guest will ignore these displays if they share the same serial number,
779     * so ensure each instance gets a unique one.
780     */
781    s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc
782                                              port:0
783                                         serialNum:next_pgdisplay_serial_num++];
784    [disp_desc release];
785
786    if (s->display_modes != NULL && s->num_display_modes > 0) {
787        trace_apple_gfx_common_realize_modes_property(s->num_display_modes);
788        display_modes = s->display_modes;
789        num_display_modes = s->num_display_modes;
790    }
791    s->pgdisp.modeList = mode_array =
792        apple_gfx_create_display_mode_array(display_modes, num_display_modes);
793    [mode_array release];
794
795    s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s);
796    return true;
797}
798
799/* ------ Display mode list device property ------ */
800
801static void apple_gfx_get_display_mode(Object *obj, Visitor *v,
802                                       const char *name, void *opaque,
803                                       Error **errp)
804{
805    Property *prop = opaque;
806    AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
807    /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */
808    char buffer[5 * 3 + 2 + 1];
809    char *pos = buffer;
810
811    int rc = snprintf(buffer, sizeof(buffer),
812                      "%"PRIu16"x%"PRIu16"@%"PRIu16,
813                      mode->width_px, mode->height_px,
814                      mode->refresh_rate_hz);
815    assert(rc < sizeof(buffer));
816
817    visit_type_str(v, name, &pos, errp);
818}
819
820static void apple_gfx_set_display_mode(Object *obj, Visitor *v,
821                                       const char *name, void *opaque,
822                                       Error **errp)
823{
824    Property *prop = opaque;
825    AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
826    const char *endptr;
827    g_autofree char *str = NULL;
828    int ret;
829    int val;
830
831    if (!visit_type_str(v, name, &str, errp)) {
832        return;
833    }
834
835    endptr = str;
836
837    ret = qemu_strtoi(endptr, &endptr, 10, &val);
838    if (ret || val > UINT16_MAX || val <= 0) {
839        error_setg(errp, "width in '%s' must be a decimal integer number"
840                         " of pixels in the range 1..65535", name);
841        return;
842    }
843    mode->width_px = val;
844    if (*endptr != 'x') {
845        goto separator_error;
846    }
847
848    ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
849    if (ret || val > UINT16_MAX || val <= 0) {
850        error_setg(errp, "height in '%s' must be a decimal integer number"
851                         " of pixels in the range 1..65535", name);
852        return;
853    }
854    mode->height_px = val;
855    if (*endptr != '@') {
856        goto separator_error;
857    }
858
859    ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
860    if (ret || val > UINT16_MAX || val <= 0) {
861        error_setg(errp, "refresh rate in '%s'"
862                         " must be a positive decimal integer (Hertz)", name);
863        return;
864    }
865    mode->refresh_rate_hz = val;
866    return;
867
868separator_error:
869    error_setg(errp,
870               "Each display mode takes the format '<width>x<height>@<rate>'");
871}
872
873const PropertyInfo qdev_prop_apple_gfx_display_mode = {
874    .type  = "display_mode",
875    .description =
876        "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> "
877        "Example: 3840x2160@60",
878    .get   = apple_gfx_get_display_mode,
879    .set   = apple_gfx_set_display_mode,
880};
881