xref: /qemu/hw/ide/pci.c (revision 7cda62087c0baf064486f3d803184c2c3b35c04a)
1977e1244SGerd Hoffmann /*
2977e1244SGerd Hoffmann  * QEMU IDE Emulation: PCI Bus support.
3977e1244SGerd Hoffmann  *
4977e1244SGerd Hoffmann  * Copyright (c) 2003 Fabrice Bellard
5977e1244SGerd Hoffmann  * Copyright (c) 2006 Openedhand Ltd.
6977e1244SGerd Hoffmann  *
7977e1244SGerd Hoffmann  * Permission is hereby granted, free of charge, to any person obtaining a copy
8977e1244SGerd Hoffmann  * of this software and associated documentation files (the "Software"), to deal
9977e1244SGerd Hoffmann  * in the Software without restriction, including without limitation the rights
10977e1244SGerd Hoffmann  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11977e1244SGerd Hoffmann  * copies of the Software, and to permit persons to whom the Software is
12977e1244SGerd Hoffmann  * furnished to do so, subject to the following conditions:
13977e1244SGerd Hoffmann  *
14977e1244SGerd Hoffmann  * The above copyright notice and this permission notice shall be included in
15977e1244SGerd Hoffmann  * all copies or substantial portions of the Software.
16977e1244SGerd Hoffmann  *
17977e1244SGerd Hoffmann  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18977e1244SGerd Hoffmann  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19977e1244SGerd Hoffmann  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20977e1244SGerd Hoffmann  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21977e1244SGerd Hoffmann  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22977e1244SGerd Hoffmann  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23977e1244SGerd Hoffmann  * THE SOFTWARE.
24977e1244SGerd Hoffmann  */
2559f2a787SGerd Hoffmann #include <hw/hw.h>
260d09e41aSPaolo Bonzini #include <hw/i386/pc.h>
27a2cb15b0SMichael S. Tsirkin #include <hw/pci/pci.h>
280d09e41aSPaolo Bonzini #include <hw/isa/isa.h>
294be74634SMarkus Armbruster #include "sysemu/block-backend.h"
309c17d615SPaolo Bonzini #include "sysemu/dma.h"
313251bdcfSJohn Snow #include "qemu/error-report.h"
3265c0f135SJuan Quintela #include <hw/ide/pci.h>
33977e1244SGerd Hoffmann 
3440a6238aSAlexander Graf #define BMDMA_PAGE_SIZE 4096
3540a6238aSAlexander Graf 
367e2648dfSPaolo Bonzini #define BM_MIGRATION_COMPAT_STATUS_BITS \
37fd648f10SPaolo Bonzini         (IDE_RETRY_DMA | IDE_RETRY_PIO | \
38fd648f10SPaolo Bonzini         IDE_RETRY_READ | IDE_RETRY_FLUSH)
397e2648dfSPaolo Bonzini 
4040a6238aSAlexander Graf static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
41097310b5SMarkus Armbruster                             BlockCompletionFunc *dma_cb)
4240a6238aSAlexander Graf {
4340a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
4440a6238aSAlexander Graf 
4540a6238aSAlexander Graf     bm->dma_cb = dma_cb;
4640a6238aSAlexander Graf     bm->cur_prd_last = 0;
4740a6238aSAlexander Graf     bm->cur_prd_addr = 0;
4840a6238aSAlexander Graf     bm->cur_prd_len = 0;
4940a6238aSAlexander Graf 
5040a6238aSAlexander Graf     if (bm->status & BM_STATUS_DMAING) {
5140a6238aSAlexander Graf         bm->dma_cb(bmdma_active_if(bm), 0);
5240a6238aSAlexander Graf     }
5340a6238aSAlexander Graf }
5440a6238aSAlexander Graf 
553251bdcfSJohn Snow /**
56a718978eSJohn Snow  * Prepare an sglist based on available PRDs.
57a718978eSJohn Snow  * @limit: How many bytes to prepare total.
58a718978eSJohn Snow  *
59a718978eSJohn Snow  * Returns the number of bytes prepared, -1 on error.
60a718978eSJohn Snow  * IDEState.io_buffer_size will contain the number of bytes described
61a718978eSJohn Snow  * by the PRDs, whether or not we added them to the sglist.
623251bdcfSJohn Snow  */
63a718978eSJohn Snow static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
6440a6238aSAlexander Graf {
6540a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
6640a6238aSAlexander Graf     IDEState *s = bmdma_active_if(bm);
67f6c11d56SAndreas Färber     PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
6840a6238aSAlexander Graf     struct {
6940a6238aSAlexander Graf         uint32_t addr;
7040a6238aSAlexander Graf         uint32_t size;
7140a6238aSAlexander Graf     } prd;
7240a6238aSAlexander Graf     int l, len;
7340a6238aSAlexander Graf 
74f6c11d56SAndreas Färber     pci_dma_sglist_init(&s->sg, pci_dev,
75552908feSDavid Gibson                         s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
7640a6238aSAlexander Graf     s->io_buffer_size = 0;
7740a6238aSAlexander Graf     for(;;) {
7840a6238aSAlexander Graf         if (bm->cur_prd_len == 0) {
7940a6238aSAlexander Graf             /* end of table (with a fail safe of one page) */
8040a6238aSAlexander Graf             if (bm->cur_prd_last ||
813251bdcfSJohn Snow                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
82a718978eSJohn Snow                 return s->sg.size;
833251bdcfSJohn Snow             }
84f6c11d56SAndreas Färber             pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
8540a6238aSAlexander Graf             bm->cur_addr += 8;
8640a6238aSAlexander Graf             prd.addr = le32_to_cpu(prd.addr);
8740a6238aSAlexander Graf             prd.size = le32_to_cpu(prd.size);
8840a6238aSAlexander Graf             len = prd.size & 0xfffe;
8940a6238aSAlexander Graf             if (len == 0)
9040a6238aSAlexander Graf                 len = 0x10000;
9140a6238aSAlexander Graf             bm->cur_prd_len = len;
9240a6238aSAlexander Graf             bm->cur_prd_addr = prd.addr;
9340a6238aSAlexander Graf             bm->cur_prd_last = (prd.size & 0x80000000);
9440a6238aSAlexander Graf         }
9540a6238aSAlexander Graf         l = bm->cur_prd_len;
9640a6238aSAlexander Graf         if (l > 0) {
97a718978eSJohn Snow             uint64_t sg_len;
98a718978eSJohn Snow 
99a718978eSJohn Snow             /* Don't add extra bytes to the SGList; consume any remaining
100a718978eSJohn Snow              * PRDs from the guest, but ignore them. */
101a718978eSJohn Snow             sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
102a718978eSJohn Snow             if (sg_len) {
103a718978eSJohn Snow                 qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
104a718978eSJohn Snow             }
1053251bdcfSJohn Snow 
10640a6238aSAlexander Graf             bm->cur_prd_addr += l;
10740a6238aSAlexander Graf             bm->cur_prd_len -= l;
10840a6238aSAlexander Graf             s->io_buffer_size += l;
10940a6238aSAlexander Graf         }
11040a6238aSAlexander Graf     }
1113251bdcfSJohn Snow 
1123251bdcfSJohn Snow     qemu_sglist_destroy(&s->sg);
1133251bdcfSJohn Snow     s->io_buffer_size = 0;
1143251bdcfSJohn Snow     return -1;
11540a6238aSAlexander Graf }
11640a6238aSAlexander Graf 
11740a6238aSAlexander Graf /* return 0 if buffer completed */
11840a6238aSAlexander Graf static int bmdma_rw_buf(IDEDMA *dma, int is_write)
11940a6238aSAlexander Graf {
12040a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
12140a6238aSAlexander Graf     IDEState *s = bmdma_active_if(bm);
122f6c11d56SAndreas Färber     PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
12340a6238aSAlexander Graf     struct {
12440a6238aSAlexander Graf         uint32_t addr;
12540a6238aSAlexander Graf         uint32_t size;
12640a6238aSAlexander Graf     } prd;
12740a6238aSAlexander Graf     int l, len;
12840a6238aSAlexander Graf 
12940a6238aSAlexander Graf     for(;;) {
13040a6238aSAlexander Graf         l = s->io_buffer_size - s->io_buffer_index;
13140a6238aSAlexander Graf         if (l <= 0)
13240a6238aSAlexander Graf             break;
13340a6238aSAlexander Graf         if (bm->cur_prd_len == 0) {
13440a6238aSAlexander Graf             /* end of table (with a fail safe of one page) */
13540a6238aSAlexander Graf             if (bm->cur_prd_last ||
13640a6238aSAlexander Graf                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
13740a6238aSAlexander Graf                 return 0;
138f6c11d56SAndreas Färber             pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
13940a6238aSAlexander Graf             bm->cur_addr += 8;
14040a6238aSAlexander Graf             prd.addr = le32_to_cpu(prd.addr);
14140a6238aSAlexander Graf             prd.size = le32_to_cpu(prd.size);
14240a6238aSAlexander Graf             len = prd.size & 0xfffe;
14340a6238aSAlexander Graf             if (len == 0)
14440a6238aSAlexander Graf                 len = 0x10000;
14540a6238aSAlexander Graf             bm->cur_prd_len = len;
14640a6238aSAlexander Graf             bm->cur_prd_addr = prd.addr;
14740a6238aSAlexander Graf             bm->cur_prd_last = (prd.size & 0x80000000);
14840a6238aSAlexander Graf         }
14940a6238aSAlexander Graf         if (l > bm->cur_prd_len)
15040a6238aSAlexander Graf             l = bm->cur_prd_len;
15140a6238aSAlexander Graf         if (l > 0) {
15240a6238aSAlexander Graf             if (is_write) {
153f6c11d56SAndreas Färber                 pci_dma_write(pci_dev, bm->cur_prd_addr,
15440a6238aSAlexander Graf                               s->io_buffer + s->io_buffer_index, l);
15540a6238aSAlexander Graf             } else {
156f6c11d56SAndreas Färber                 pci_dma_read(pci_dev, bm->cur_prd_addr,
15740a6238aSAlexander Graf                              s->io_buffer + s->io_buffer_index, l);
15840a6238aSAlexander Graf             }
15940a6238aSAlexander Graf             bm->cur_prd_addr += l;
16040a6238aSAlexander Graf             bm->cur_prd_len -= l;
16140a6238aSAlexander Graf             s->io_buffer_index += l;
16240a6238aSAlexander Graf         }
16340a6238aSAlexander Graf     }
16440a6238aSAlexander Graf     return 1;
16540a6238aSAlexander Graf }
16640a6238aSAlexander Graf 
1670e7ce54cSPaolo Bonzini static void bmdma_set_inactive(IDEDMA *dma, bool more)
16840a6238aSAlexander Graf {
16940a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
17040a6238aSAlexander Graf 
17140a6238aSAlexander Graf     bm->dma_cb = NULL;
1720e7ce54cSPaolo Bonzini     if (more) {
1730e7ce54cSPaolo Bonzini         bm->status |= BM_STATUS_DMAING;
1740e7ce54cSPaolo Bonzini     } else {
1750e7ce54cSPaolo Bonzini         bm->status &= ~BM_STATUS_DMAING;
1760e7ce54cSPaolo Bonzini     }
17740a6238aSAlexander Graf }
17840a6238aSAlexander Graf 
179bd8892c4SPaolo Bonzini static void bmdma_restart_dma(IDEDMA *dma)
18040a6238aSAlexander Graf {
18140a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
18240a6238aSAlexander Graf 
18306b95b1eSPaolo Bonzini     bm->cur_addr = bm->addr;
18440a6238aSAlexander Graf }
18540a6238aSAlexander Graf 
18640a6238aSAlexander Graf static void bmdma_cancel(BMDMAState *bm)
18740a6238aSAlexander Graf {
18840a6238aSAlexander Graf     if (bm->status & BM_STATUS_DMAING) {
18940a6238aSAlexander Graf         /* cancel DMA request */
1900e7ce54cSPaolo Bonzini         bmdma_set_inactive(&bm->dma, false);
19140a6238aSAlexander Graf     }
19240a6238aSAlexander Graf }
19340a6238aSAlexander Graf 
1941374bec0SPaolo Bonzini static void bmdma_reset(IDEDMA *dma)
19540a6238aSAlexander Graf {
19640a6238aSAlexander Graf     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
19740a6238aSAlexander Graf 
19840a6238aSAlexander Graf #ifdef DEBUG_IDE
19940a6238aSAlexander Graf     printf("ide: dma_reset\n");
20040a6238aSAlexander Graf #endif
20140a6238aSAlexander Graf     bmdma_cancel(bm);
20240a6238aSAlexander Graf     bm->cmd = 0;
20340a6238aSAlexander Graf     bm->status = 0;
20440a6238aSAlexander Graf     bm->addr = 0;
20540a6238aSAlexander Graf     bm->cur_addr = 0;
20640a6238aSAlexander Graf     bm->cur_prd_last = 0;
20740a6238aSAlexander Graf     bm->cur_prd_addr = 0;
20840a6238aSAlexander Graf     bm->cur_prd_len = 0;
20940a6238aSAlexander Graf }
21040a6238aSAlexander Graf 
21140a6238aSAlexander Graf static void bmdma_irq(void *opaque, int n, int level)
21240a6238aSAlexander Graf {
21340a6238aSAlexander Graf     BMDMAState *bm = opaque;
21440a6238aSAlexander Graf 
21540a6238aSAlexander Graf     if (!level) {
21640a6238aSAlexander Graf         /* pass through lower */
21740a6238aSAlexander Graf         qemu_set_irq(bm->irq, level);
21840a6238aSAlexander Graf         return;
21940a6238aSAlexander Graf     }
22040a6238aSAlexander Graf 
22140a6238aSAlexander Graf     bm->status |= BM_STATUS_INT;
22240a6238aSAlexander Graf 
22340a6238aSAlexander Graf     /* trigger the real irq */
22440a6238aSAlexander Graf     qemu_set_irq(bm->irq, level);
22540a6238aSAlexander Graf }
22640a6238aSAlexander Graf 
227a9deb8c6SAvi Kivity void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
228977e1244SGerd Hoffmann {
229977e1244SGerd Hoffmann #ifdef DEBUG_IDE
230977e1244SGerd Hoffmann     printf("%s: 0x%08x\n", __func__, val);
231977e1244SGerd Hoffmann #endif
232c29947bbSKevin Wolf 
233c29947bbSKevin Wolf     /* Ignore writes to SSBM if it keeps the old value */
234c29947bbSKevin Wolf     if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
235977e1244SGerd Hoffmann         if (!(val & BM_CMD_START)) {
236*7cda6208SPeter Lieven             /* First invoke the callbacks of all buffered requests
237*7cda6208SPeter Lieven              * and flag those requests as orphaned. Ideally there
238*7cda6208SPeter Lieven              * are no unbuffered (Scatter Gather DMA Requests or
239*7cda6208SPeter Lieven              * write requests) pending and we can avoid to drain. */
240*7cda6208SPeter Lieven             IDEBufferedRequest *req;
241*7cda6208SPeter Lieven             IDEState *s = idebus_active_if(bm->bus);
242*7cda6208SPeter Lieven             QLIST_FOREACH(req, &s->buffered_requests, list) {
243*7cda6208SPeter Lieven                 if (!req->orphaned) {
244*7cda6208SPeter Lieven #ifdef DEBUG_IDE
245*7cda6208SPeter Lieven                     printf("%s: invoking cb %p of buffered request %p with"
246*7cda6208SPeter Lieven                            " -ECANCELED\n", __func__, req->original_cb, req);
247*7cda6208SPeter Lieven #endif
248*7cda6208SPeter Lieven                     req->original_cb(req->original_opaque, -ECANCELED);
249*7cda6208SPeter Lieven                 }
250*7cda6208SPeter Lieven                 req->orphaned = true;
251*7cda6208SPeter Lieven             }
252953844d1SAndrea Arcangeli             /*
253953844d1SAndrea Arcangeli              * We can't cancel Scatter Gather DMA in the middle of the
254953844d1SAndrea Arcangeli              * operation or a partial (not full) DMA transfer would reach
255953844d1SAndrea Arcangeli              * the storage so we wait for completion instead (we beahve
256953844d1SAndrea Arcangeli              * like if the DMA was completed by the time the guest trying
257953844d1SAndrea Arcangeli              * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
258953844d1SAndrea Arcangeli              * set).
259953844d1SAndrea Arcangeli              *
260953844d1SAndrea Arcangeli              * In the future we'll be able to safely cancel the I/O if the
261953844d1SAndrea Arcangeli              * whole DMA operation will be submitted to disk with a single
262953844d1SAndrea Arcangeli              * aio operation with preadv/pwritev.
263953844d1SAndrea Arcangeli              */
26440a6238aSAlexander Graf             if (bm->bus->dma->aiocb) {
265*7cda6208SPeter Lieven #ifdef DEBUG_IDE
266*7cda6208SPeter Lieven                 printf("%s: draining all remaining requests", __func__);
267*7cda6208SPeter Lieven #endif
2684be74634SMarkus Armbruster                 blk_drain_all();
2692860e3ebSKevin Wolf                 assert(bm->bus->dma->aiocb == NULL);
270953844d1SAndrea Arcangeli             }
271b39f9612SKevin Wolf             bm->status &= ~BM_STATUS_DMAING;
272977e1244SGerd Hoffmann         } else {
273b76876e6SKevin Wolf             bm->cur_addr = bm->addr;
274977e1244SGerd Hoffmann             if (!(bm->status & BM_STATUS_DMAING)) {
275977e1244SGerd Hoffmann                 bm->status |= BM_STATUS_DMAING;
276977e1244SGerd Hoffmann                 /* start dma transfer if possible */
277977e1244SGerd Hoffmann                 if (bm->dma_cb)
27840a6238aSAlexander Graf                     bm->dma_cb(bmdma_active_if(bm), 0);
279977e1244SGerd Hoffmann             }
280977e1244SGerd Hoffmann         }
281977e1244SGerd Hoffmann     }
282977e1244SGerd Hoffmann 
283c29947bbSKevin Wolf     bm->cmd = val & 0x09;
284c29947bbSKevin Wolf }
285c29947bbSKevin Wolf 
286a8170e5eSAvi Kivity static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
287a9deb8c6SAvi Kivity                                 unsigned width)
288977e1244SGerd Hoffmann {
289a9deb8c6SAvi Kivity     BMDMAState *bm = opaque;
2909fbef1acSAvi Kivity     uint32_t mask = (1ULL << (width * 8)) - 1;
291a9deb8c6SAvi Kivity     uint64_t data;
2929fbef1acSAvi Kivity 
293a9deb8c6SAvi Kivity     data = (bm->addr >> (addr * 8)) & mask;
294977e1244SGerd Hoffmann #ifdef DEBUG_IDE
295cb67be85SHervé Poussineau     printf("%s: 0x%08x\n", __func__, (unsigned)data);
296977e1244SGerd Hoffmann #endif
297a9deb8c6SAvi Kivity     return data;
298977e1244SGerd Hoffmann }
299977e1244SGerd Hoffmann 
300a8170e5eSAvi Kivity static void bmdma_addr_write(void *opaque, hwaddr addr,
301a9deb8c6SAvi Kivity                              uint64_t data, unsigned width)
302977e1244SGerd Hoffmann {
303a9deb8c6SAvi Kivity     BMDMAState *bm = opaque;
3049fbef1acSAvi Kivity     int shift = addr * 8;
3059fbef1acSAvi Kivity     uint32_t mask = (1ULL << (width * 8)) - 1;
3069fbef1acSAvi Kivity 
307977e1244SGerd Hoffmann #ifdef DEBUG_IDE
3089fbef1acSAvi Kivity     printf("%s: 0x%08x\n", __func__, (unsigned)data);
309977e1244SGerd Hoffmann #endif
3109fbef1acSAvi Kivity     bm->addr &= ~(mask << shift);
3119fbef1acSAvi Kivity     bm->addr |= ((data & mask) << shift) & ~3;
312977e1244SGerd Hoffmann }
313977e1244SGerd Hoffmann 
314a9deb8c6SAvi Kivity MemoryRegionOps bmdma_addr_ioport_ops = {
3159fbef1acSAvi Kivity     .read = bmdma_addr_read,
3169fbef1acSAvi Kivity     .write = bmdma_addr_write,
317a9deb8c6SAvi Kivity     .endianness = DEVICE_LITTLE_ENDIAN,
3189fbef1acSAvi Kivity };
319977e1244SGerd Hoffmann 
3205ee84c33SJuan Quintela static bool ide_bmdma_current_needed(void *opaque)
3215ee84c33SJuan Quintela {
3225ee84c33SJuan Quintela     BMDMAState *bm = opaque;
3235ee84c33SJuan Quintela 
3245ee84c33SJuan Quintela     return (bm->cur_prd_len != 0);
3255ee84c33SJuan Quintela }
3265ee84c33SJuan Quintela 
327def93791SKevin Wolf static bool ide_bmdma_status_needed(void *opaque)
328def93791SKevin Wolf {
329def93791SKevin Wolf     BMDMAState *bm = opaque;
330def93791SKevin Wolf 
331def93791SKevin Wolf     /* Older versions abused some bits in the status register for internal
332def93791SKevin Wolf      * error state. If any of these bits are set, we must add a subsection to
333def93791SKevin Wolf      * transfer the real status register */
334def93791SKevin Wolf     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
335def93791SKevin Wolf 
336def93791SKevin Wolf     return ((bm->status & abused_bits) != 0);
337def93791SKevin Wolf }
338def93791SKevin Wolf 
339def93791SKevin Wolf static void ide_bmdma_pre_save(void *opaque)
340def93791SKevin Wolf {
341def93791SKevin Wolf     BMDMAState *bm = opaque;
342def93791SKevin Wolf     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
343def93791SKevin Wolf 
344a96cb236SPaolo Bonzini     bm->migration_retry_unit = bm->bus->retry_unit;
345dc5d0af4SPaolo Bonzini     bm->migration_retry_sector_num = bm->bus->retry_sector_num;
346dc5d0af4SPaolo Bonzini     bm->migration_retry_nsector = bm->bus->retry_nsector;
347def93791SKevin Wolf     bm->migration_compat_status =
348def93791SKevin Wolf         (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
349def93791SKevin Wolf }
350def93791SKevin Wolf 
351def93791SKevin Wolf /* This function accesses bm->bus->error_status which is loaded only after
352def93791SKevin Wolf  * BMDMA itself. This is why the function is called from ide_pci_post_load
353def93791SKevin Wolf  * instead of being registered with VMState where it would run too early. */
354def93791SKevin Wolf static int ide_bmdma_post_load(void *opaque, int version_id)
355def93791SKevin Wolf {
356def93791SKevin Wolf     BMDMAState *bm = opaque;
357def93791SKevin Wolf     uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
358def93791SKevin Wolf 
359def93791SKevin Wolf     if (bm->status == 0) {
360def93791SKevin Wolf         bm->status = bm->migration_compat_status & ~abused_bits;
361def93791SKevin Wolf         bm->bus->error_status |= bm->migration_compat_status & abused_bits;
362def93791SKevin Wolf     }
363a96cb236SPaolo Bonzini     if (bm->bus->error_status) {
364dc5d0af4SPaolo Bonzini         bm->bus->retry_sector_num = bm->migration_retry_sector_num;
365dc5d0af4SPaolo Bonzini         bm->bus->retry_nsector = bm->migration_retry_nsector;
366a96cb236SPaolo Bonzini         bm->bus->retry_unit = bm->migration_retry_unit;
367a96cb236SPaolo Bonzini     }
368def93791SKevin Wolf 
369def93791SKevin Wolf     return 0;
370def93791SKevin Wolf }
371def93791SKevin Wolf 
3725ee84c33SJuan Quintela static const VMStateDescription vmstate_bmdma_current = {
3735ee84c33SJuan Quintela     .name = "ide bmdma_current",
3745ee84c33SJuan Quintela     .version_id = 1,
3755ee84c33SJuan Quintela     .minimum_version_id = 1,
3765cd8cadaSJuan Quintela     .needed = ide_bmdma_current_needed,
3775ee84c33SJuan Quintela     .fields = (VMStateField[]) {
3785ee84c33SJuan Quintela         VMSTATE_UINT32(cur_addr, BMDMAState),
3795ee84c33SJuan Quintela         VMSTATE_UINT32(cur_prd_last, BMDMAState),
3805ee84c33SJuan Quintela         VMSTATE_UINT32(cur_prd_addr, BMDMAState),
3815ee84c33SJuan Quintela         VMSTATE_UINT32(cur_prd_len, BMDMAState),
3825ee84c33SJuan Quintela         VMSTATE_END_OF_LIST()
3835ee84c33SJuan Quintela     }
3845ee84c33SJuan Quintela };
3855ee84c33SJuan Quintela 
38606ab66cfSStefan Weil static const VMStateDescription vmstate_bmdma_status = {
387def93791SKevin Wolf     .name ="ide bmdma/status",
388def93791SKevin Wolf     .version_id = 1,
389def93791SKevin Wolf     .minimum_version_id = 1,
3905cd8cadaSJuan Quintela     .needed = ide_bmdma_status_needed,
391def93791SKevin Wolf     .fields = (VMStateField[]) {
392def93791SKevin Wolf         VMSTATE_UINT8(status, BMDMAState),
393def93791SKevin Wolf         VMSTATE_END_OF_LIST()
394def93791SKevin Wolf     }
395def93791SKevin Wolf };
3965ee84c33SJuan Quintela 
397407a4f30SJuan Quintela static const VMStateDescription vmstate_bmdma = {
398407a4f30SJuan Quintela     .name = "ide bmdma",
39957338424SJuan Quintela     .version_id = 3,
400407a4f30SJuan Quintela     .minimum_version_id = 0,
401def93791SKevin Wolf     .pre_save  = ide_bmdma_pre_save,
402407a4f30SJuan Quintela     .fields = (VMStateField[]) {
403407a4f30SJuan Quintela         VMSTATE_UINT8(cmd, BMDMAState),
404def93791SKevin Wolf         VMSTATE_UINT8(migration_compat_status, BMDMAState),
405407a4f30SJuan Quintela         VMSTATE_UINT32(addr, BMDMAState),
406dc5d0af4SPaolo Bonzini         VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
407dc5d0af4SPaolo Bonzini         VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
408a96cb236SPaolo Bonzini         VMSTATE_UINT8(migration_retry_unit, BMDMAState),
409407a4f30SJuan Quintela         VMSTATE_END_OF_LIST()
4105ee84c33SJuan Quintela     },
4115cd8cadaSJuan Quintela     .subsections = (const VMStateDescription*[]) {
4125cd8cadaSJuan Quintela         &vmstate_bmdma_current,
4135cd8cadaSJuan Quintela         &vmstate_bmdma_status,
4145cd8cadaSJuan Quintela         NULL
415407a4f30SJuan Quintela     }
416407a4f30SJuan Quintela };
417407a4f30SJuan Quintela 
418407a4f30SJuan Quintela static int ide_pci_post_load(void *opaque, int version_id)
419977e1244SGerd Hoffmann {
420977e1244SGerd Hoffmann     PCIIDEState *d = opaque;
421977e1244SGerd Hoffmann     int i;
422977e1244SGerd Hoffmann 
423977e1244SGerd Hoffmann     for(i = 0; i < 2; i++) {
424407a4f30SJuan Quintela         /* current versions always store 0/1, but older version
425407a4f30SJuan Quintela            stored bigger values. We only need last bit */
426a96cb236SPaolo Bonzini         d->bmdma[i].migration_retry_unit &= 1;
427def93791SKevin Wolf         ide_bmdma_post_load(&d->bmdma[i], -1);
428977e1244SGerd Hoffmann     }
429def93791SKevin Wolf 
430977e1244SGerd Hoffmann     return 0;
431977e1244SGerd Hoffmann }
432977e1244SGerd Hoffmann 
433407a4f30SJuan Quintela const VMStateDescription vmstate_ide_pci = {
434407a4f30SJuan Quintela     .name = "ide",
43557338424SJuan Quintela     .version_id = 3,
436407a4f30SJuan Quintela     .minimum_version_id = 0,
437407a4f30SJuan Quintela     .post_load = ide_pci_post_load,
438407a4f30SJuan Quintela     .fields = (VMStateField[]) {
439f6c11d56SAndreas Färber         VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
440407a4f30SJuan Quintela         VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
441407a4f30SJuan Quintela                              vmstate_bmdma, BMDMAState),
442407a4f30SJuan Quintela         VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
443407a4f30SJuan Quintela         VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
444407a4f30SJuan Quintela         VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
445407a4f30SJuan Quintela         VMSTATE_END_OF_LIST()
446407a4f30SJuan Quintela     }
447407a4f30SJuan Quintela };
448407a4f30SJuan Quintela 
4493e7e1558SJuan Quintela void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
450feef3102SGerd Hoffmann {
451f6c11d56SAndreas Färber     PCIIDEState *d = PCI_IDE(dev);
452feef3102SGerd Hoffmann     static const int bus[4]  = { 0, 0, 1, 1 };
453feef3102SGerd Hoffmann     static const int unit[4] = { 0, 1, 0, 1 };
454feef3102SGerd Hoffmann     int i;
455feef3102SGerd Hoffmann 
456feef3102SGerd Hoffmann     for (i = 0; i < 4; i++) {
457feef3102SGerd Hoffmann         if (hd_table[i] == NULL)
458feef3102SGerd Hoffmann             continue;
4591f850f10SGerd Hoffmann         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
460feef3102SGerd Hoffmann     }
461feef3102SGerd Hoffmann }
46240a6238aSAlexander Graf 
46340a6238aSAlexander Graf static const struct IDEDMAOps bmdma_ops = {
46440a6238aSAlexander Graf     .start_dma = bmdma_start_dma,
46540a6238aSAlexander Graf     .prepare_buf = bmdma_prepare_buf,
46640a6238aSAlexander Graf     .rw_buf = bmdma_rw_buf,
467bd8892c4SPaolo Bonzini     .restart_dma = bmdma_restart_dma,
46840a6238aSAlexander Graf     .set_inactive = bmdma_set_inactive,
46940a6238aSAlexander Graf     .reset = bmdma_reset,
47040a6238aSAlexander Graf };
47140a6238aSAlexander Graf 
472a9deb8c6SAvi Kivity void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
47340a6238aSAlexander Graf {
47440a6238aSAlexander Graf     if (bus->dma == &bm->dma) {
47540a6238aSAlexander Graf         return;
47640a6238aSAlexander Graf     }
47740a6238aSAlexander Graf 
47840a6238aSAlexander Graf     bm->dma.ops = &bmdma_ops;
47940a6238aSAlexander Graf     bus->dma = &bm->dma;
48040a6238aSAlexander Graf     bm->irq = bus->irq;
4816e38a4baSShannon Zhao     bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
482a9deb8c6SAvi Kivity     bm->pci_dev = d;
48340a6238aSAlexander Graf }
484f6c11d56SAndreas Färber 
485f6c11d56SAndreas Färber static const TypeInfo pci_ide_type_info = {
486f6c11d56SAndreas Färber     .name = TYPE_PCI_IDE,
487f6c11d56SAndreas Färber     .parent = TYPE_PCI_DEVICE,
488f6c11d56SAndreas Färber     .instance_size = sizeof(PCIIDEState),
489f6c11d56SAndreas Färber     .abstract = true,
490f6c11d56SAndreas Färber };
491f6c11d56SAndreas Färber 
492f6c11d56SAndreas Färber static void pci_ide_register_types(void)
493f6c11d56SAndreas Färber {
494f6c11d56SAndreas Färber     type_register_static(&pci_ide_type_info);
495f6c11d56SAndreas Färber }
496f6c11d56SAndreas Färber 
497f6c11d56SAndreas Färber type_init(pci_ide_register_types)
498