xref: /qemu/net/vmnet-common.m (revision efe25c260cd69dcfc948e1622bedbdec953569a8)
1/*
2 * vmnet-common.m - network client wrapper for Apple vmnet.framework
3 *
4 * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
5 * Copyright(c) 2021 Phillip Tennen <phillip@axleos.com>
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12#include "qemu/osdep.h"
13#include "qemu/main-loop.h"
14#include "qemu/log.h"
15#include "qapi/qapi-types-net.h"
16#include "vmnet_int.h"
17#include "clients.h"
18#include "qemu/error-report.h"
19#include "qapi/error.h"
20#include "system/runstate.h"
21#include "net/eth.h"
22
23#include <vmnet/vmnet.h>
24#include <dispatch/dispatch.h>
25
26
27static void vmnet_send_completed(NetClientState *nc, ssize_t len);
28
29
30const char *vmnet_status_map_str(vmnet_return_t status)
31{
32    switch (status) {
33    case VMNET_SUCCESS:
34        return "success";
35    case VMNET_FAILURE:
36        return "general failure (possibly not enough privileges)";
37    case VMNET_MEM_FAILURE:
38        return "memory allocation failure";
39    case VMNET_INVALID_ARGUMENT:
40        return "invalid argument specified";
41    case VMNET_SETUP_INCOMPLETE:
42        return "interface setup is not complete";
43    case VMNET_INVALID_ACCESS:
44        return "invalid access, permission denied";
45    case VMNET_PACKET_TOO_BIG:
46        return "packet size is larger than MTU";
47    case VMNET_BUFFER_EXHAUSTED:
48        return "buffers exhausted in kernel";
49    case VMNET_TOO_MANY_PACKETS:
50        return "packet count exceeds limit";
51    case VMNET_SHARING_SERVICE_BUSY:
52        return "conflict, sharing service is in use";
53    default:
54        return "unknown vmnet error";
55    }
56}
57
58
59/**
60 * Write packets from QEMU to vmnet interface.
61 *
62 * vmnet.framework supports iov, but writing more than
63 * one iov into vmnet interface fails with
64 * 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into
65 * one and passing it to vmnet works fine. That's the
66 * reason why receive_iov() left unimplemented. But it still
67 * works with good performance having .receive() only.
68 */
69ssize_t vmnet_receive_common(NetClientState *nc,
70                             const uint8_t *buf,
71                             size_t size)
72{
73    VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
74    struct vmpktdesc packet;
75    struct iovec iov;
76    int pkt_cnt;
77    vmnet_return_t if_status;
78
79    if (size > s->max_packet_size) {
80        warn_report("vmnet: packet is too big, %zu > %" PRIu64,
81            packet.vm_pkt_size,
82            s->max_packet_size);
83        return -1;
84    }
85
86    iov.iov_base = (char *) buf;
87    iov.iov_len = size;
88
89    packet.vm_pkt_iovcnt = 1;
90    packet.vm_flags = 0;
91    packet.vm_pkt_size = size;
92    packet.vm_pkt_iov = &iov;
93    pkt_cnt = 1;
94
95    if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
96    if (if_status != VMNET_SUCCESS) {
97        error_report("vmnet: write error: %s",
98                     vmnet_status_map_str(if_status));
99        return -1;
100    }
101
102    if (pkt_cnt) {
103        return size;
104    }
105    return 0;
106}
107
108
109/**
110 * Read packets from vmnet interface and write them
111 * to temporary buffers in VmnetState.
112 *
113 * Returns read packets number (may be 0) on success,
114 * -1 on error
115 */
116static int vmnet_read_packets(VmnetState *s)
117{
118    assert(s->packets_send_current_pos == s->packets_send_end_pos);
119
120    struct vmpktdesc *packets = s->packets_buf;
121    vmnet_return_t status;
122    int i;
123
124    /* Read as many packets as present */
125    s->packets_send_current_pos = 0;
126    s->packets_send_end_pos = VMNET_PACKETS_LIMIT;
127    for (i = 0; i < s->packets_send_end_pos; ++i) {
128        packets[i].vm_pkt_size = s->max_packet_size;
129        packets[i].vm_pkt_iovcnt = 1;
130        packets[i].vm_flags = 0;
131    }
132
133    status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos);
134    if (status != VMNET_SUCCESS) {
135        error_printf("vmnet: read failed: %s\n",
136                     vmnet_status_map_str(status));
137        s->packets_send_current_pos = 0;
138        s->packets_send_end_pos = 0;
139        return -1;
140    }
141    return s->packets_send_end_pos;
142}
143
144
145/**
146 * Write packets from temporary buffers in VmnetState
147 * to QEMU.
148 */
149static void vmnet_write_packets_to_qemu(VmnetState *s)
150{
151    uint8_t *pkt;
152    size_t pktsz;
153    uint8_t min_pkt[ETH_ZLEN];
154    size_t min_pktsz;
155    ssize_t size;
156
157    while (s->packets_send_current_pos < s->packets_send_end_pos) {
158        pkt = s->iov_buf[s->packets_send_current_pos].iov_base;
159        pktsz = s->packets_buf[s->packets_send_current_pos].vm_pkt_size;
160
161        if (net_peer_needs_padding(&s->nc)) {
162            min_pktsz = sizeof(min_pkt);
163
164            if (eth_pad_short_frame(min_pkt, &min_pktsz, pkt, pktsz)) {
165                pkt = min_pkt;
166                pktsz = min_pktsz;
167            }
168        }
169
170        size = qemu_send_packet_async(&s->nc, pkt, pktsz,
171                                      vmnet_send_completed);
172
173        if (size == 0) {
174            /* QEMU is not ready to consume more packets -
175             * stop and wait for completion callback call */
176            return;
177        }
178        ++s->packets_send_current_pos;
179    }
180}
181
182
183/**
184 * Bottom half callback that transfers packets from vmnet interface
185 * to QEMU.
186 *
187 * The process of transferring packets is three-staged:
188 * 1. Handle vmnet event;
189 * 2. Read packets from vmnet interface into temporary buffer;
190 * 3. Write packets from temporary buffer to QEMU.
191 *
192 * QEMU may suspend this process on the last stage, returning 0 from
193 * qemu_send_packet_async function. If this happens, we should
194 * respectfully wait until it is ready to consume more packets,
195 * write left ones in temporary buffer and only after this
196 * continue reading more packets from vmnet interface.
197 *
198 * Packets to be transferred are stored into packets_buf,
199 * in the window [packets_send_current_pos..packets_send_end_pos)
200 * including current_pos, excluding end_pos.
201 *
202 * Thus, if QEMU is not ready, buffer is not read and
203 * packets_send_current_pos < packets_send_end_pos.
204 */
205static void vmnet_send_bh(void *opaque)
206{
207    NetClientState *nc = (NetClientState *) opaque;
208    VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
209
210    /*
211     * Do nothing if QEMU is not ready - wait
212     * for completion callback invocation
213     */
214    if (s->packets_send_current_pos < s->packets_send_end_pos) {
215        return;
216    }
217
218    /* Read packets from vmnet interface */
219    if (vmnet_read_packets(s) > 0) {
220        /* Send them to QEMU */
221        vmnet_write_packets_to_qemu(s);
222    }
223}
224
225
226/**
227 * Completion callback to be invoked by QEMU when it becomes
228 * ready to consume more packets.
229 */
230static void vmnet_send_completed(NetClientState *nc, ssize_t len)
231{
232    VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
233
234    /* Callback is invoked eq queued packet is sent */
235    ++s->packets_send_current_pos;
236
237    /* Complete sending packets left in VmnetState buffers */
238    vmnet_write_packets_to_qemu(s);
239
240    /* And read new ones from vmnet if VmnetState buffer is ready */
241    if (s->packets_send_current_pos < s->packets_send_end_pos) {
242        qemu_bh_schedule(s->send_bh);
243    }
244}
245
246
247static void vmnet_bufs_init(VmnetState *s)
248{
249    struct vmpktdesc *packets = s->packets_buf;
250    struct iovec *iov = s->iov_buf;
251    int i;
252
253    for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
254        iov[i].iov_len = s->max_packet_size;
255        iov[i].iov_base = g_malloc0(iov[i].iov_len);
256        packets[i].vm_pkt_iov = iov + i;
257    }
258}
259
260/**
261 * Called on state change to un-register/re-register handlers
262 */
263static void vmnet_vm_state_change_cb(void *opaque, bool running, RunState state)
264{
265    VmnetState *s = opaque;
266
267    if (running) {
268        vmnet_interface_set_event_callback(
269            s->vmnet_if,
270            VMNET_INTERFACE_PACKETS_AVAILABLE,
271            s->if_queue,
272            ^(interface_event_t event_id, xpc_object_t event) {
273                assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE);
274                /*
275                 * This function is being called from a non qemu thread, so
276                 * we only schedule a BH, and do the rest of the io completion
277                 * handling from vmnet_send_bh() which runs in a qemu context.
278                 */
279                qemu_bh_schedule(s->send_bh);
280            });
281    } else {
282        vmnet_interface_set_event_callback(
283            s->vmnet_if,
284            VMNET_INTERFACE_PACKETS_AVAILABLE,
285            NULL,
286            NULL);
287    }
288}
289
290int vmnet_if_create(NetClientState *nc,
291                    xpc_object_t if_desc,
292                    Error **errp)
293{
294    VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
295    dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0);
296    __block vmnet_return_t if_status;
297
298    s->if_queue = dispatch_queue_create(
299        "org.qemu.vmnet.if_queue",
300        DISPATCH_QUEUE_SERIAL
301    );
302
303    xpc_dictionary_set_bool(
304        if_desc,
305        vmnet_allocate_mac_address_key,
306        false
307    );
308
309#ifdef DEBUG
310    qemu_log("vmnet.start.interface_desc:\n");
311    xpc_dictionary_apply(if_desc,
312                         ^bool(const char *k, xpc_object_t v) {
313                             char *desc = xpc_copy_description(v);
314                             qemu_log("  %s=%s\n", k, desc);
315                             free(desc);
316                             return true;
317                         });
318#endif /* DEBUG */
319
320    s->vmnet_if = vmnet_start_interface(
321        if_desc,
322        s->if_queue,
323        ^(vmnet_return_t status, xpc_object_t interface_param) {
324            if_status = status;
325            if (status != VMNET_SUCCESS || !interface_param) {
326                dispatch_semaphore_signal(if_created_sem);
327                return;
328            }
329
330#ifdef DEBUG
331            qemu_log("vmnet.start.interface_param:\n");
332            xpc_dictionary_apply(interface_param,
333                                 ^bool(const char *k, xpc_object_t v) {
334                                     char *desc = xpc_copy_description(v);
335                                     qemu_log("  %s=%s\n", k, desc);
336                                     free(desc);
337                                     return true;
338                                 });
339#endif /* DEBUG */
340
341            s->mtu = xpc_dictionary_get_uint64(
342                interface_param,
343                vmnet_mtu_key);
344            s->max_packet_size = xpc_dictionary_get_uint64(
345                interface_param,
346                vmnet_max_packet_size_key);
347
348            dispatch_semaphore_signal(if_created_sem);
349        });
350
351    if (s->vmnet_if == NULL) {
352        dispatch_release(s->if_queue);
353        dispatch_release(if_created_sem);
354        error_setg(errp,
355                   "unable to create interface with requested params");
356        return -1;
357    }
358
359    dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER);
360    dispatch_release(if_created_sem);
361
362    if (if_status != VMNET_SUCCESS) {
363        dispatch_release(s->if_queue);
364        error_setg(errp,
365                   "cannot create vmnet interface: %s",
366                   vmnet_status_map_str(if_status));
367        return -1;
368    }
369
370    s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc);
371    vmnet_bufs_init(s);
372
373    s->packets_send_current_pos = 0;
374    s->packets_send_end_pos = 0;
375
376    vmnet_vm_state_change_cb(s, 1, RUN_STATE_RUNNING);
377
378    s->change = qemu_add_vm_change_state_handler(vmnet_vm_state_change_cb, s);
379
380    return 0;
381}
382
383
384void vmnet_cleanup_common(NetClientState *nc)
385{
386    VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
387    dispatch_semaphore_t if_stopped_sem;
388
389    if (s->vmnet_if == NULL) {
390        return;
391    }
392
393    vmnet_vm_state_change_cb(s, 0, RUN_STATE_SHUTDOWN);
394    qemu_del_vm_change_state_handler(s->change);
395    if_stopped_sem = dispatch_semaphore_create(0);
396    vmnet_stop_interface(
397        s->vmnet_if,
398        s->if_queue,
399        ^(vmnet_return_t status) {
400            assert(status == VMNET_SUCCESS);
401            dispatch_semaphore_signal(if_stopped_sem);
402        });
403    dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER);
404
405    qemu_purge_queued_packets(nc);
406
407    qemu_bh_delete(s->send_bh);
408    dispatch_release(if_stopped_sem);
409    dispatch_release(s->if_queue);
410
411    for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
412        g_free(s->iov_buf[i].iov_base);
413    }
414}
415