1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * virtio-snd: Virtio sound device
4 * Copyright (C) 2021 OpenSynergy GmbH
5 */
6 #include <sound/pcm_params.h>
7
8 #include "virtio_card.h"
9
10 /**
11 * struct virtio_pcm_msg - VirtIO I/O message.
12 * @substream: VirtIO PCM substream.
13 * @xfer: Request header payload.
14 * @status: Response header payload.
15 * @length: Data length in bytes.
16 * @sgs: Payload scatter-gather table.
17 */
18 struct virtio_pcm_msg {
19 struct virtio_pcm_substream *substream;
20 struct virtio_snd_pcm_xfer xfer;
21 struct virtio_snd_pcm_status status;
22 size_t length;
23 struct scatterlist sgs[];
24 };
25
26 /**
27 * enum pcm_msg_sg_index - Index values for the virtio_pcm_msg->sgs field in
28 * an I/O message.
29 * @PCM_MSG_SG_XFER: Element containing a virtio_snd_pcm_xfer structure.
30 * @PCM_MSG_SG_STATUS: Element containing a virtio_snd_pcm_status structure.
31 * @PCM_MSG_SG_DATA: The first element containing a data buffer.
32 */
33 enum pcm_msg_sg_index {
34 PCM_MSG_SG_XFER = 0,
35 PCM_MSG_SG_STATUS,
36 PCM_MSG_SG_DATA
37 };
38
39 /**
40 * virtsnd_pcm_sg_num() - Count the number of sg-elements required to represent
41 * vmalloc'ed buffer.
42 * @data: Pointer to vmalloc'ed buffer.
43 * @length: Buffer size.
44 *
45 * Context: Any context.
46 * Return: Number of physically contiguous parts in the @data.
47 */
virtsnd_pcm_sg_num(u8 * data,unsigned int length)48 static int virtsnd_pcm_sg_num(u8 *data, unsigned int length)
49 {
50 phys_addr_t sg_address;
51 unsigned int sg_length;
52 int num = 0;
53
54 while (length) {
55 struct page *pg = vmalloc_to_page(data);
56 phys_addr_t pg_address = page_to_phys(pg);
57 size_t pg_length;
58
59 pg_length = PAGE_SIZE - offset_in_page(data);
60 if (pg_length > length)
61 pg_length = length;
62
63 if (!num || sg_address + sg_length != pg_address) {
64 sg_address = pg_address;
65 sg_length = pg_length;
66 num++;
67 } else {
68 sg_length += pg_length;
69 }
70
71 data += pg_length;
72 length -= pg_length;
73 }
74
75 return num;
76 }
77
78 /**
79 * virtsnd_pcm_sg_from() - Build sg-list from vmalloc'ed buffer.
80 * @sgs: Preallocated sg-list to populate.
81 * @nsgs: The maximum number of elements in the @sgs.
82 * @data: Pointer to vmalloc'ed buffer.
83 * @length: Buffer size.
84 *
85 * Splits the buffer into physically contiguous parts and makes an sg-list of
86 * such parts.
87 *
88 * Context: Any context.
89 */
virtsnd_pcm_sg_from(struct scatterlist * sgs,int nsgs,u8 * data,unsigned int length)90 static void virtsnd_pcm_sg_from(struct scatterlist *sgs, int nsgs, u8 *data,
91 unsigned int length)
92 {
93 int idx = -1;
94
95 while (length) {
96 struct page *pg = vmalloc_to_page(data);
97 size_t pg_length;
98
99 pg_length = PAGE_SIZE - offset_in_page(data);
100 if (pg_length > length)
101 pg_length = length;
102
103 if (idx == -1 ||
104 sg_phys(&sgs[idx]) + sgs[idx].length != page_to_phys(pg)) {
105 if (idx + 1 == nsgs)
106 break;
107 sg_set_page(&sgs[++idx], pg, pg_length,
108 offset_in_page(data));
109 } else {
110 sgs[idx].length += pg_length;
111 }
112
113 data += pg_length;
114 length -= pg_length;
115 }
116
117 sg_mark_end(&sgs[idx]);
118 }
119
120 /**
121 * virtsnd_pcm_msg_alloc() - Allocate I/O messages.
122 * @vss: VirtIO PCM substream.
123 * @periods: Current number of periods.
124 * @period_bytes: Current period size in bytes.
125 *
126 * The function slices the buffer into @periods parts (each with the size of
127 * @period_bytes), and creates @periods corresponding I/O messages.
128 *
129 * Context: Any context that permits to sleep.
130 * Return: 0 on success, -ENOMEM on failure.
131 */
virtsnd_pcm_msg_alloc(struct virtio_pcm_substream * vss,unsigned int periods,unsigned int period_bytes)132 int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
133 unsigned int periods, unsigned int period_bytes)
134 {
135 struct snd_pcm_runtime *runtime = vss->substream->runtime;
136 unsigned int i;
137
138 vss->msgs = kzalloc_objs(*vss->msgs, periods);
139 if (!vss->msgs)
140 return -ENOMEM;
141
142 vss->nmsgs = periods;
143
144 for (i = 0; i < periods; ++i) {
145 u8 *data = runtime->dma_area + period_bytes * i;
146 int sg_num = virtsnd_pcm_sg_num(data, period_bytes);
147 struct virtio_pcm_msg *msg;
148
149 msg = kzalloc_flex(*msg, sgs, sg_num + 2);
150 if (!msg)
151 return -ENOMEM;
152
153 msg->substream = vss;
154 sg_init_one(&msg->sgs[PCM_MSG_SG_XFER], &msg->xfer,
155 sizeof(msg->xfer));
156 sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status,
157 sizeof(msg->status));
158 virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data,
159 period_bytes);
160
161 vss->msgs[i] = msg;
162 }
163
164 return 0;
165 }
166
167 /**
168 * virtsnd_pcm_msg_free() - Free all allocated I/O messages.
169 * @vss: VirtIO PCM substream.
170 *
171 * Context: Any context.
172 */
virtsnd_pcm_msg_free(struct virtio_pcm_substream * vss)173 void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss)
174 {
175 unsigned int i;
176
177 for (i = 0; vss->msgs && i < vss->nmsgs; ++i)
178 kfree(vss->msgs[i]);
179 kfree(vss->msgs);
180
181 vss->msgs = NULL;
182 vss->nmsgs = 0;
183 }
184
185 /**
186 * virtsnd_pcm_msg_send() - Send asynchronous I/O messages.
187 * @vss: VirtIO PCM substream.
188 * @offset: starting position that has been updated
189 * @bytes: number of bytes that has been updated
190 *
191 * All messages are organized in an ordered circular list. Each time the
192 * function is called, all currently non-enqueued messages are added to the
193 * virtqueue. For this, the function uses offset and bytes to calculate the
194 * messages that need to be added.
195 *
196 * Context: Any context. Expects the tx/rx queue and the VirtIO substream
197 * spinlocks to be held by caller.
198 * Return: 0 on success, -errno on failure.
199 */
virtsnd_pcm_msg_send(struct virtio_pcm_substream * vss,unsigned long offset,unsigned long bytes)200 int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss, unsigned long offset,
201 unsigned long bytes)
202 {
203 struct virtio_snd *snd = vss->snd;
204 struct virtio_device *vdev = snd->vdev;
205 struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue;
206 unsigned long period_bytes = snd_pcm_lib_period_bytes(vss->substream);
207 unsigned long start, end, i;
208 unsigned int msg_count = vss->msg_count;
209 bool notify = false;
210 int rc;
211
212 start = offset / period_bytes;
213 end = (offset + bytes - 1) / period_bytes;
214
215 for (i = start; i <= end; i++) {
216 struct virtio_pcm_msg *msg = vss->msgs[i];
217 struct scatterlist *psgs[] = {
218 &msg->sgs[PCM_MSG_SG_XFER],
219 &msg->sgs[PCM_MSG_SG_DATA],
220 &msg->sgs[PCM_MSG_SG_STATUS]
221 };
222 unsigned long n;
223
224 n = period_bytes - (offset % period_bytes);
225 if (n > bytes)
226 n = bytes;
227
228 msg->length += n;
229 if (msg->length == period_bytes) {
230 msg->xfer.stream_id = cpu_to_le32(vss->sid);
231 memset(&msg->status, 0, sizeof(msg->status));
232
233 if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
234 rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
235 GFP_ATOMIC);
236 else
237 rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
238 GFP_ATOMIC);
239
240 if (rc) {
241 dev_err(&vdev->dev,
242 "SID %u: failed to send I/O message\n",
243 vss->sid);
244 return rc;
245 }
246
247 vss->msg_count++;
248 }
249
250 offset = 0;
251 bytes -= n;
252 }
253
254 if (msg_count == vss->msg_count)
255 return 0;
256
257 if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)))
258 notify = virtqueue_kick_prepare(vqueue);
259
260 if (notify)
261 virtqueue_notify(vqueue);
262
263 return 0;
264 }
265
266 /**
267 * virtsnd_pcm_msg_pending_num() - Returns the number of pending I/O messages.
268 * @vss: VirtIO substream.
269 *
270 * Context: Any context.
271 * Return: Number of messages.
272 */
virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream * vss)273 unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss)
274 {
275 guard(spinlock_irqsave)(&vss->lock);
276 return vss->msg_count;
277 }
278
279 /**
280 * virtsnd_pcm_msg_complete() - Complete an I/O message.
281 * @msg: I/O message.
282 * @written_bytes: Number of bytes written to the message.
283 *
284 * Completion of the message means the elapsed period. If transmission is
285 * allowed, then each completed message is immediately placed back at the end
286 * of the queue.
287 *
288 * For the playback substream, @written_bytes is equal to sizeof(msg->status).
289 *
290 * For the capture substream, @written_bytes is equal to sizeof(msg->status)
291 * plus the number of captured bytes.
292 *
293 * Context: Interrupt context. Takes and releases the VirtIO substream spinlock.
294 */
virtsnd_pcm_msg_complete(struct virtio_pcm_msg * msg,size_t written_bytes)295 static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
296 size_t written_bytes)
297 {
298 struct virtio_pcm_substream *vss = msg->substream;
299
300 /*
301 * hw_ptr always indicates the buffer position of the first I/O message
302 * in the virtqueue. Therefore, on each completion of an I/O message,
303 * the hw_ptr value is unconditionally advanced.
304 */
305 guard(spinlock)(&vss->lock);
306 /*
307 * If the capture substream returned an incorrect status, then just
308 * increase the hw_ptr by the message size.
309 */
310 if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK ||
311 written_bytes <= sizeof(msg->status))
312 vss->hw_ptr += msg->length;
313 else
314 vss->hw_ptr += written_bytes - sizeof(msg->status);
315
316 if (vss->hw_ptr >= vss->buffer_bytes)
317 vss->hw_ptr -= vss->buffer_bytes;
318
319 msg->length = 0;
320
321 vss->xfer_xrun = false;
322 vss->msg_count--;
323
324 if (vss->xfer_enabled) {
325 struct snd_pcm_runtime *runtime = vss->substream->runtime;
326
327 runtime->delay =
328 bytes_to_frames(runtime,
329 le32_to_cpu(msg->status.latency_bytes));
330
331 schedule_work(&vss->elapsed_period);
332 } else if (!vss->msg_count) {
333 wake_up_all(&vss->msg_empty);
334 }
335 }
336
337 /**
338 * virtsnd_pcm_notify_cb() - Process all completed I/O messages.
339 * @queue: Underlying tx/rx virtqueue.
340 *
341 * Context: Interrupt context. Takes and releases the tx/rx queue spinlock.
342 */
virtsnd_pcm_notify_cb(struct virtio_snd_queue * queue)343 static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
344 {
345 struct virtio_pcm_msg *msg;
346 u32 written_bytes;
347
348 guard(spinlock_irqsave)(&queue->lock);
349 do {
350 virtqueue_disable_cb(queue->vqueue);
351 while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
352 virtsnd_pcm_msg_complete(msg, written_bytes);
353 } while (!virtqueue_enable_cb(queue->vqueue));
354 }
355
356 /**
357 * virtsnd_pcm_tx_notify_cb() - Process all completed TX messages.
358 * @vqueue: Underlying tx virtqueue.
359 *
360 * Context: Interrupt context.
361 */
virtsnd_pcm_tx_notify_cb(struct virtqueue * vqueue)362 void virtsnd_pcm_tx_notify_cb(struct virtqueue *vqueue)
363 {
364 struct virtio_snd *snd = vqueue->vdev->priv;
365
366 virtsnd_pcm_notify_cb(virtsnd_tx_queue(snd));
367 }
368
369 /**
370 * virtsnd_pcm_rx_notify_cb() - Process all completed RX messages.
371 * @vqueue: Underlying rx virtqueue.
372 *
373 * Context: Interrupt context.
374 */
virtsnd_pcm_rx_notify_cb(struct virtqueue * vqueue)375 void virtsnd_pcm_rx_notify_cb(struct virtqueue *vqueue)
376 {
377 struct virtio_snd *snd = vqueue->vdev->priv;
378
379 virtsnd_pcm_notify_cb(virtsnd_rx_queue(snd));
380 }
381
382 /**
383 * virtsnd_pcm_ctl_msg_alloc() - Allocate and initialize the PCM device control
384 * message for the specified substream.
385 * @vss: VirtIO PCM substream.
386 * @command: Control request code (VIRTIO_SND_R_PCM_XXX).
387 * @gfp: Kernel flags for memory allocation.
388 *
389 * Context: Any context. May sleep if @gfp flags permit.
390 * Return: Allocated message on success, NULL on failure.
391 */
392 struct virtio_snd_msg *
virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream * vss,unsigned int command,gfp_t gfp)393 virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream *vss,
394 unsigned int command, gfp_t gfp)
395 {
396 size_t request_size = sizeof(struct virtio_snd_pcm_hdr);
397 size_t response_size = sizeof(struct virtio_snd_hdr);
398 struct virtio_snd_msg *msg;
399
400 switch (command) {
401 case VIRTIO_SND_R_PCM_SET_PARAMS:
402 request_size = sizeof(struct virtio_snd_pcm_set_params);
403 break;
404 }
405
406 msg = virtsnd_ctl_msg_alloc(request_size, response_size, gfp);
407 if (msg) {
408 struct virtio_snd_pcm_hdr *hdr = virtsnd_ctl_msg_request(msg);
409
410 hdr->hdr.code = cpu_to_le32(command);
411 hdr->stream_id = cpu_to_le32(vss->sid);
412 }
413
414 return msg;
415 }
416