xref: /qemu/hw/dma/sifive_pdma.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * SiFive Platform DMA emulation
3  *
4  * Copyright (c) 2020 Wind River Systems, Inc.
5  *
6  * Author:
7  *   Bin Meng <bin.meng@windriver.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 or
12  * (at your option) version 3 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qemu/bitops.h"
25 #include "qemu/log.h"
26 #include "qapi/error.h"
27 #include "hw/irq.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/sysbus.h"
30 #include "migration/vmstate.h"
31 #include "system/dma.h"
32 #include "hw/dma/sifive_pdma.h"
33 
34 #define DMA_CONTROL         0x000
35 #define   CONTROL_CLAIM     BIT(0)
36 #define   CONTROL_RUN       BIT(1)
37 #define   CONTROL_DONE_IE   BIT(14)
38 #define   CONTROL_ERR_IE    BIT(15)
39 #define   CONTROL_DONE      BIT(30)
40 #define   CONTROL_ERR       BIT(31)
41 
42 #define DMA_NEXT_CONFIG     0x004
43 #define   CONFIG_REPEAT     BIT(2)
44 #define   CONFIG_ORDER      BIT(3)
45 #define   CONFIG_WRSZ_SHIFT 24
46 #define   CONFIG_RDSZ_SHIFT 28
47 #define   CONFIG_SZ_MASK    0xf
48 
49 #define DMA_NEXT_BYTES      0x008
50 #define DMA_NEXT_DST        0x010
51 #define DMA_NEXT_SRC        0x018
52 #define DMA_EXEC_CONFIG     0x104
53 #define DMA_EXEC_BYTES      0x108
54 #define DMA_EXEC_DST        0x110
55 #define DMA_EXEC_SRC        0x118
56 
57 /*
58  * FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values.
59  * The reset values tested on Unleashed/Unmatched boards are 6 instead of 0.
60  */
61 #define CONFIG_WRSZ_DEFAULT 6
62 #define CONFIG_RDSZ_DEFAULT 6
63 
64 enum dma_chan_state {
65     DMA_CHAN_STATE_IDLE,
66     DMA_CHAN_STATE_STARTED,
67     DMA_CHAN_STATE_ERROR,
68     DMA_CHAN_STATE_DONE
69 };
70 
sifive_pdma_run(SiFivePDMAState * s,int ch)71 static void sifive_pdma_run(SiFivePDMAState *s, int ch)
72 {
73     uint64_t bytes = s->chan[ch].next_bytes;
74     uint64_t dst = s->chan[ch].next_dst;
75     uint64_t src = s->chan[ch].next_src;
76     uint32_t config = s->chan[ch].next_config;
77     int wsize, rsize, size, remainder;
78     uint8_t buf[64];
79     int n;
80 
81     /* do nothing if bytes to transfer is zero */
82     if (!bytes) {
83         goto done;
84     }
85 
86     /*
87      * The manual does not describe how the hardware behaviors when
88      * config.wsize and config.rsize are given different values.
89      * A common case is memory to memory DMA, and in this case they
90      * are normally the same. Abort if this expectation fails.
91      */
92     wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
93     rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
94     if (wsize != rsize) {
95         goto error;
96     }
97 
98     /*
99      * Calculate the transaction size
100      *
101      * size field is base 2 logarithm of DMA transaction size,
102      * but there is an upper limit of 64 bytes per transaction.
103      */
104     size = wsize;
105     if (size > 6) {
106         size = 6;
107     }
108     size = 1 << size;
109     remainder = bytes % size;
110 
111     /* indicate a DMA transfer is started */
112     s->chan[ch].state = DMA_CHAN_STATE_STARTED;
113     s->chan[ch].control &= ~CONTROL_DONE;
114     s->chan[ch].control &= ~CONTROL_ERR;
115 
116     /* load the next_ registers into their exec_ counterparts */
117     s->chan[ch].exec_config = config;
118     s->chan[ch].exec_bytes = bytes;
119     s->chan[ch].exec_dst = dst;
120     s->chan[ch].exec_src = src;
121 
122     for (n = 0; n < bytes / size; n++) {
123         cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
124         cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
125         s->chan[ch].exec_src += size;
126         s->chan[ch].exec_dst += size;
127         s->chan[ch].exec_bytes -= size;
128     }
129 
130     if (remainder) {
131         cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder);
132         cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder);
133         s->chan[ch].exec_src += remainder;
134         s->chan[ch].exec_dst += remainder;
135         s->chan[ch].exec_bytes -= remainder;
136     }
137 
138     /* reload exec_ registers if repeat is required */
139     if (s->chan[ch].next_config & CONFIG_REPEAT) {
140         s->chan[ch].exec_bytes = bytes;
141         s->chan[ch].exec_dst = dst;
142         s->chan[ch].exec_src = src;
143     }
144 
145 done:
146     /* indicate a DMA transfer is done */
147     s->chan[ch].state = DMA_CHAN_STATE_DONE;
148     s->chan[ch].control &= ~CONTROL_RUN;
149     s->chan[ch].control |= CONTROL_DONE;
150     return;
151 
152 error:
153     s->chan[ch].state = DMA_CHAN_STATE_ERROR;
154     s->chan[ch].control |= CONTROL_ERR;
155 }
156 
sifive_pdma_update_irq(SiFivePDMAState * s,int ch)157 static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
158 {
159     bool done_ie, err_ie;
160 
161     done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
162     err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
163 
164     if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
165         qemu_irq_raise(s->irq[ch * 2]);
166     } else {
167         qemu_irq_lower(s->irq[ch * 2]);
168     }
169 
170     if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
171         qemu_irq_raise(s->irq[ch * 2 + 1]);
172     } else {
173         qemu_irq_lower(s->irq[ch * 2 + 1]);
174     }
175 
176     s->chan[ch].state = DMA_CHAN_STATE_IDLE;
177 }
178 
sifive_pdma_readq(SiFivePDMAState * s,int ch,hwaddr offset)179 static uint64_t sifive_pdma_readq(SiFivePDMAState *s, int ch, hwaddr offset)
180 {
181     uint64_t val = 0;
182 
183     offset &= 0xfff;
184     switch (offset) {
185     case DMA_NEXT_BYTES:
186         val = s->chan[ch].next_bytes;
187         break;
188     case DMA_NEXT_DST:
189         val = s->chan[ch].next_dst;
190         break;
191     case DMA_NEXT_SRC:
192         val = s->chan[ch].next_src;
193         break;
194     case DMA_EXEC_BYTES:
195         val = s->chan[ch].exec_bytes;
196         break;
197     case DMA_EXEC_DST:
198         val = s->chan[ch].exec_dst;
199         break;
200     case DMA_EXEC_SRC:
201         val = s->chan[ch].exec_src;
202         break;
203     default:
204         qemu_log_mask(LOG_GUEST_ERROR,
205                       "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
206                       __func__, offset);
207         break;
208     }
209 
210     return val;
211 }
212 
sifive_pdma_readl(SiFivePDMAState * s,int ch,hwaddr offset)213 static uint32_t sifive_pdma_readl(SiFivePDMAState *s, int ch, hwaddr offset)
214 {
215     uint32_t val = 0;
216 
217     offset &= 0xfff;
218     switch (offset) {
219     case DMA_CONTROL:
220         val = s->chan[ch].control;
221         break;
222     case DMA_NEXT_CONFIG:
223         val = s->chan[ch].next_config;
224         break;
225     case DMA_NEXT_BYTES:
226         val = extract64(s->chan[ch].next_bytes, 0, 32);
227         break;
228     case DMA_NEXT_BYTES + 4:
229         val = extract64(s->chan[ch].next_bytes, 32, 32);
230         break;
231     case DMA_NEXT_DST:
232         val = extract64(s->chan[ch].next_dst, 0, 32);
233         break;
234     case DMA_NEXT_DST + 4:
235         val = extract64(s->chan[ch].next_dst, 32, 32);
236         break;
237     case DMA_NEXT_SRC:
238         val = extract64(s->chan[ch].next_src, 0, 32);
239         break;
240     case DMA_NEXT_SRC + 4:
241         val = extract64(s->chan[ch].next_src, 32, 32);
242         break;
243     case DMA_EXEC_CONFIG:
244         val = s->chan[ch].exec_config;
245         break;
246     case DMA_EXEC_BYTES:
247         val = extract64(s->chan[ch].exec_bytes, 0, 32);
248         break;
249     case DMA_EXEC_BYTES + 4:
250         val = extract64(s->chan[ch].exec_bytes, 32, 32);
251         break;
252     case DMA_EXEC_DST:
253         val = extract64(s->chan[ch].exec_dst, 0, 32);
254         break;
255     case DMA_EXEC_DST + 4:
256         val = extract64(s->chan[ch].exec_dst, 32, 32);
257         break;
258     case DMA_EXEC_SRC:
259         val = extract64(s->chan[ch].exec_src, 0, 32);
260         break;
261     case DMA_EXEC_SRC + 4:
262         val = extract64(s->chan[ch].exec_src, 32, 32);
263         break;
264     default:
265         qemu_log_mask(LOG_GUEST_ERROR,
266                       "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
267                       __func__, offset);
268         break;
269     }
270 
271     return val;
272 }
273 
sifive_pdma_read(void * opaque,hwaddr offset,unsigned size)274 static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
275 {
276     SiFivePDMAState *s = opaque;
277     int ch = SIFIVE_PDMA_CHAN_NO(offset);
278     uint64_t val = 0;
279 
280     if (ch >= SIFIVE_PDMA_CHANS) {
281         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
282                       __func__, ch);
283         return 0;
284     }
285 
286     switch (size) {
287     case 8:
288         val = sifive_pdma_readq(s, ch, offset);
289         break;
290     case 4:
291         val = sifive_pdma_readl(s, ch, offset);
292         break;
293     default:
294         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid read size %u to PDMA\n",
295                       __func__, size);
296         return 0;
297     }
298 
299     return val;
300 }
301 
sifive_pdma_writeq(SiFivePDMAState * s,int ch,hwaddr offset,uint64_t value)302 static void sifive_pdma_writeq(SiFivePDMAState *s, int ch,
303                                hwaddr offset, uint64_t value)
304 {
305     offset &= 0xfff;
306     switch (offset) {
307     case DMA_NEXT_BYTES:
308         s->chan[ch].next_bytes = value;
309         break;
310     case DMA_NEXT_DST:
311         s->chan[ch].next_dst = value;
312         break;
313     case DMA_NEXT_SRC:
314         s->chan[ch].next_src = value;
315         break;
316     case DMA_EXEC_BYTES:
317     case DMA_EXEC_DST:
318     case DMA_EXEC_SRC:
319         /* these are read-only registers */
320         break;
321     default:
322         qemu_log_mask(LOG_GUEST_ERROR,
323                       "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
324                       __func__, offset);
325         break;
326     }
327 }
328 
sifive_pdma_writel(SiFivePDMAState * s,int ch,hwaddr offset,uint32_t value)329 static void sifive_pdma_writel(SiFivePDMAState *s, int ch,
330                                hwaddr offset, uint32_t value)
331 {
332     bool claimed, run;
333 
334     offset &= 0xfff;
335     switch (offset) {
336     case DMA_CONTROL:
337         claimed = !!(s->chan[ch].control & CONTROL_CLAIM);
338         run = !!(s->chan[ch].control & CONTROL_RUN);
339 
340         if (!claimed && (value & CONTROL_CLAIM)) {
341             /* reset Next* registers */
342             s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) |
343                                       (CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT);
344             s->chan[ch].next_bytes = 0;
345             s->chan[ch].next_dst = 0;
346             s->chan[ch].next_src = 0;
347         }
348 
349         /* claim bit can only be cleared when run is low */
350         if (run && !(value & CONTROL_CLAIM)) {
351             value |= CONTROL_CLAIM;
352         }
353 
354         s->chan[ch].control = value;
355 
356         /*
357          * If channel was not claimed before run bit is set,
358          * or if the channel is disclaimed when run was low,
359          * DMA won't run.
360          */
361         if (!claimed || (!run && !(value & CONTROL_CLAIM))) {
362             s->chan[ch].control &= ~CONTROL_RUN;
363             return;
364         }
365 
366         if (value & CONTROL_RUN) {
367             sifive_pdma_run(s, ch);
368         }
369 
370         sifive_pdma_update_irq(s, ch);
371         break;
372     case DMA_NEXT_CONFIG:
373         s->chan[ch].next_config = value;
374         break;
375     case DMA_NEXT_BYTES:
376         s->chan[ch].next_bytes =
377             deposit64(s->chan[ch].next_bytes, 0, 32, value);
378         break;
379     case DMA_NEXT_BYTES + 4:
380         s->chan[ch].next_bytes =
381             deposit64(s->chan[ch].next_bytes, 32, 32, value);
382         break;
383     case DMA_NEXT_DST:
384         s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 0, 32, value);
385         break;
386     case DMA_NEXT_DST + 4:
387         s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 32, 32, value);
388         break;
389     case DMA_NEXT_SRC:
390         s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 0, 32, value);
391         break;
392     case DMA_NEXT_SRC + 4:
393         s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 32, 32, value);
394         break;
395     case DMA_EXEC_CONFIG:
396     case DMA_EXEC_BYTES:
397     case DMA_EXEC_BYTES + 4:
398     case DMA_EXEC_DST:
399     case DMA_EXEC_DST + 4:
400     case DMA_EXEC_SRC:
401     case DMA_EXEC_SRC + 4:
402         /* these are read-only registers */
403         break;
404     default:
405         qemu_log_mask(LOG_GUEST_ERROR,
406                       "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
407                       __func__, offset);
408         break;
409     }
410 }
411 
sifive_pdma_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)412 static void sifive_pdma_write(void *opaque, hwaddr offset,
413                               uint64_t value, unsigned size)
414 {
415     SiFivePDMAState *s = opaque;
416     int ch = SIFIVE_PDMA_CHAN_NO(offset);
417 
418     if (ch >= SIFIVE_PDMA_CHANS) {
419         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
420                       __func__, ch);
421         return;
422     }
423 
424     switch (size) {
425     case 8:
426         sifive_pdma_writeq(s, ch, offset, value);
427         break;
428     case 4:
429         sifive_pdma_writel(s, ch, offset, (uint32_t) value);
430         break;
431     default:
432         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid write size %u to PDMA\n",
433                       __func__, size);
434         break;
435     }
436 }
437 
438 static const MemoryRegionOps sifive_pdma_ops = {
439     .read = sifive_pdma_read,
440     .write = sifive_pdma_write,
441     .endianness = DEVICE_LITTLE_ENDIAN,
442     /* there are 32-bit and 64-bit wide registers */
443     .impl = {
444         .min_access_size = 4,
445         .max_access_size = 8,
446     },
447     .valid = {
448         .min_access_size = 4,
449         .max_access_size = 8,
450     }
451 };
452 
sifive_pdma_realize(DeviceState * dev,Error ** errp)453 static void sifive_pdma_realize(DeviceState *dev, Error **errp)
454 {
455     SiFivePDMAState *s = SIFIVE_PDMA(dev);
456     int i;
457 
458     memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
459                           TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
460     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
461 
462     for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
463         sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
464     }
465 }
466 
sifive_pdma_class_init(ObjectClass * klass,const void * data)467 static void sifive_pdma_class_init(ObjectClass *klass, const void *data)
468 {
469     DeviceClass *dc = DEVICE_CLASS(klass);
470 
471     dc->desc = "SiFive Platform DMA controller";
472     dc->realize = sifive_pdma_realize;
473 }
474 
475 static const TypeInfo sifive_pdma_info = {
476     .name          = TYPE_SIFIVE_PDMA,
477     .parent        = TYPE_SYS_BUS_DEVICE,
478     .instance_size = sizeof(SiFivePDMAState),
479     .class_init    = sifive_pdma_class_init,
480 };
481 
sifive_pdma_register_types(void)482 static void sifive_pdma_register_types(void)
483 {
484     type_register_static(&sifive_pdma_info);
485 }
486 
487 type_init(sifive_pdma_register_types)
488