xref: /qemu/hw/ide/core.c (revision 0462a32b4f63b2448b4a196381138afd50719dc4)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/irq.h"
28 #include "hw/isa/isa.h"
29 #include "migration/vmstate.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/timer.h"
33 #include "qemu/hw-version.h"
34 #include "qemu/memalign.h"
35 #include "system/system.h"
36 #include "system/blockdev.h"
37 #include "system/dma.h"
38 #include "hw/block/block.h"
39 #include "system/block-backend.h"
40 #include "qapi/error.h"
41 #include "qemu/cutils.h"
42 #include "system/replay.h"
43 #include "system/runstate.h"
44 #include "ide-internal.h"
45 #include "trace.h"
46 
47 /* These values were based on a Seagate ST3500418AS but have been modified
48    to make more sense in QEMU */
49 static const int smart_attributes[][12] = {
50     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
51     /* raw read error rate*/
52     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53     /* spin up */
54     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
55     /* start stop count */
56     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
57     /* remapped sectors */
58     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59     /* power on hours */
60     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
61     /* power cycle count */
62     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
63     /* airflow-temperature-celsius */
64     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
65 };
66 
67 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
68     [IDE_DMA_READ] = "DMA READ",
69     [IDE_DMA_WRITE] = "DMA WRITE",
70     [IDE_DMA_TRIM] = "DMA TRIM",
71     [IDE_DMA_ATAPI] = "DMA ATAPI"
72 };
73 
IDE_DMA_CMD_str(enum ide_dma_cmd enval)74 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
75 {
76     if ((unsigned)enval < IDE_DMA__COUNT) {
77         return IDE_DMA_CMD_lookup[enval];
78     }
79     return "DMA UNKNOWN CMD";
80 }
81 
82 static void ide_dummy_transfer_stop(IDEState *s);
83 
84 const MemoryRegionPortio ide_portio_list[] = {
85     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
86     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
87     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
88     PORTIO_END_OF_LIST(),
89 };
90 
91 const MemoryRegionPortio ide_portio2_list[] = {
92     { 0, 1, 1, .read = ide_status_read, .write = ide_ctrl_write },
93     PORTIO_END_OF_LIST(),
94 };
95 
padstr(char * str,const char * src,int len)96 static void padstr(char *str, const char *src, int len)
97 {
98     int i, v;
99     for(i = 0; i < len; i++) {
100         if (*src)
101             v = *src++;
102         else
103             v = ' ';
104         str[i^1] = v;
105     }
106 }
107 
put_le16(uint16_t * p,unsigned int v)108 static void put_le16(uint16_t *p, unsigned int v)
109 {
110     *p = cpu_to_le16(v);
111 }
112 
ide_identify_size(IDEState * s)113 static void ide_identify_size(IDEState *s)
114 {
115     uint16_t *p = (uint16_t *)s->identify_data;
116     int64_t nb_sectors_lba28 = s->nb_sectors;
117     if (nb_sectors_lba28 >= 1 << 28) {
118         nb_sectors_lba28 = (1 << 28) - 1;
119     }
120     put_le16(p + 60, nb_sectors_lba28);
121     put_le16(p + 61, nb_sectors_lba28 >> 16);
122     put_le16(p + 100, s->nb_sectors);
123     put_le16(p + 101, s->nb_sectors >> 16);
124     put_le16(p + 102, s->nb_sectors >> 32);
125     put_le16(p + 103, s->nb_sectors >> 48);
126 }
127 
ide_identify(IDEState * s)128 static void ide_identify(IDEState *s)
129 {
130     uint16_t *p;
131     unsigned int oldsize;
132     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
133 
134     p = (uint16_t *)s->identify_data;
135     if (s->identify_set) {
136         goto fill_buffer;
137     }
138     memset(p, 0, sizeof(s->identify_data));
139 
140     put_le16(p + 0, 0x0040);
141     put_le16(p + 1, s->cylinders);
142     put_le16(p + 3, s->heads);
143     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
144     put_le16(p + 5, 512); /* XXX: retired, remove ? */
145     put_le16(p + 6, s->sectors);
146     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
147     put_le16(p + 20, 3); /* XXX: retired, remove ? */
148     put_le16(p + 21, 512); /* cache size in sectors */
149     put_le16(p + 22, 4); /* ecc bytes */
150     padstr((char *)(p + 23), s->version, 8); /* firmware version */
151     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
152 #if MAX_MULT_SECTORS > 1
153     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
154 #endif
155     put_le16(p + 48, 1); /* dword I/O */
156     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
157     put_le16(p + 51, 0x200); /* PIO transfer cycle */
158     put_le16(p + 52, 0x200); /* DMA transfer cycle */
159     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
160     put_le16(p + 54, s->cylinders);
161     put_le16(p + 55, s->heads);
162     put_le16(p + 56, s->sectors);
163     oldsize = s->cylinders * s->heads * s->sectors;
164     put_le16(p + 57, oldsize);
165     put_le16(p + 58, oldsize >> 16);
166     if (s->mult_sectors)
167         put_le16(p + 59, 0x100 | s->mult_sectors);
168     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
169     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
170     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
171     put_le16(p + 63, 0x07); /* mdma0-2 supported */
172     put_le16(p + 64, 0x03); /* pio3-4 supported */
173     put_le16(p + 65, 120);
174     put_le16(p + 66, 120);
175     put_le16(p + 67, 120);
176     put_le16(p + 68, 120);
177     if (dev && dev->conf.discard_granularity) {
178         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
179     }
180 
181     if (s->ncq_queues) {
182         put_le16(p + 75, s->ncq_queues - 1);
183         /* NCQ supported */
184         put_le16(p + 76, (1 << 8));
185     }
186 
187     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
188     put_le16(p + 81, 0x16); /* conforms to ata5 */
189     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
190     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
191     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
192     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
193     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
194     if (s->wwn) {
195         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
196     } else {
197         put_le16(p + 84, (1 << 14) | 0);
198     }
199     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
200     if (blk_enable_write_cache(s->blk)) {
201         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
202     } else {
203         put_le16(p + 85, (1 << 14) | 1);
204     }
205     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
206     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
207     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
208     if (s->wwn) {
209         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
210     } else {
211         put_le16(p + 87, (1 << 14) | 0);
212     }
213     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
214     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
215     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
216     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
217     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
218     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
219 
220     if (dev && dev->conf.physical_block_size)
221         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
222     if (s->wwn) {
223         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
224         put_le16(p + 108, s->wwn >> 48);
225         put_le16(p + 109, s->wwn >> 32);
226         put_le16(p + 110, s->wwn >> 16);
227         put_le16(p + 111, s->wwn);
228     }
229     if (dev && dev->conf.discard_granularity) {
230         put_le16(p + 169, 1); /* TRIM support */
231     }
232     if (dev) {
233         put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
234     }
235 
236     ide_identify_size(s);
237     s->identify_set = 1;
238 
239 fill_buffer:
240     memcpy(s->io_buffer, p, sizeof(s->identify_data));
241 }
242 
ide_atapi_identify(IDEState * s)243 static void ide_atapi_identify(IDEState *s)
244 {
245     uint16_t *p;
246 
247     p = (uint16_t *)s->identify_data;
248     if (s->identify_set) {
249         goto fill_buffer;
250     }
251     memset(p, 0, sizeof(s->identify_data));
252 
253     /* Removable CDROM, 50us response, 12 byte packets */
254     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
255     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
256     put_le16(p + 20, 3); /* buffer type */
257     put_le16(p + 21, 512); /* cache size in sectors */
258     put_le16(p + 22, 4); /* ecc bytes */
259     padstr((char *)(p + 23), s->version, 8); /* firmware version */
260     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
261     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
262 #ifdef USE_DMA_CDROM
263     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
264     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
265     put_le16(p + 62, 7);  /* single word dma0-2 supported */
266     put_le16(p + 63, 7);  /* mdma0-2 supported */
267 #else
268     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
269     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
270     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
271 #endif
272     put_le16(p + 64, 3); /* pio3-4 supported */
273     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
274     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
275     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
276     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
277 
278     put_le16(p + 71, 30); /* in ns */
279     put_le16(p + 72, 30); /* in ns */
280 
281     if (s->ncq_queues) {
282         put_le16(p + 75, s->ncq_queues - 1);
283         /* NCQ supported */
284         put_le16(p + 76, (1 << 8));
285     }
286 
287     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
288     if (s->wwn) {
289         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
290         put_le16(p + 87, (1 << 8)); /* WWN enabled */
291     }
292 
293 #ifdef USE_DMA_CDROM
294     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
295 #endif
296 
297     if (s->wwn) {
298         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
299         put_le16(p + 108, s->wwn >> 48);
300         put_le16(p + 109, s->wwn >> 32);
301         put_le16(p + 110, s->wwn >> 16);
302         put_le16(p + 111, s->wwn);
303     }
304 
305     s->identify_set = 1;
306 
307 fill_buffer:
308     memcpy(s->io_buffer, p, sizeof(s->identify_data));
309 }
310 
ide_cfata_identify_size(IDEState * s)311 static void ide_cfata_identify_size(IDEState *s)
312 {
313     uint16_t *p = (uint16_t *)s->identify_data;
314     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
315     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
316     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
317     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
318 }
319 
ide_cfata_identify(IDEState * s)320 static void ide_cfata_identify(IDEState *s)
321 {
322     uint16_t *p;
323     uint32_t cur_sec;
324 
325     p = (uint16_t *)s->identify_data;
326     if (s->identify_set) {
327         goto fill_buffer;
328     }
329     memset(p, 0, sizeof(s->identify_data));
330 
331     cur_sec = s->cylinders * s->heads * s->sectors;
332 
333     put_le16(p + 0, 0x848a);                    /* CF Storage Card signature */
334     put_le16(p + 1, s->cylinders);              /* Default cylinders */
335     put_le16(p + 3, s->heads);                  /* Default heads */
336     put_le16(p + 6, s->sectors);                /* Default sectors per track */
337     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
338     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
339     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
340     put_le16(p + 22, 0x0004);                   /* ECC bytes */
341     padstr((char *) (p + 23), s->version, 8);   /* Firmware Revision */
342     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
343 #if MAX_MULT_SECTORS > 1
344     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
345 #else
346     put_le16(p + 47, 0x0000);
347 #endif
348     put_le16(p + 49, 0x0f00);                   /* Capabilities */
349     put_le16(p + 51, 0x0002);                   /* PIO cycle timing mode */
350     put_le16(p + 52, 0x0001);                   /* DMA cycle timing mode */
351     put_le16(p + 53, 0x0003);                   /* Translation params valid */
352     put_le16(p + 54, s->cylinders);             /* Current cylinders */
353     put_le16(p + 55, s->heads);                 /* Current heads */
354     put_le16(p + 56, s->sectors);               /* Current sectors */
355     put_le16(p + 57, cur_sec);                  /* Current capacity */
356     put_le16(p + 58, cur_sec >> 16);            /* Current capacity */
357     if (s->mult_sectors)                        /* Multiple sector setting */
358         put_le16(p + 59, 0x100 | s->mult_sectors);
359     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
360     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
361     put_le16(p + 63, 0x0203);                   /* Multiword DMA capability */
362     put_le16(p + 64, 0x0001);                   /* Flow Control PIO support */
363     put_le16(p + 65, 0x0096);                   /* Min. Multiword DMA cycle */
364     put_le16(p + 66, 0x0096);                   /* Rec. Multiword DMA cycle */
365     put_le16(p + 68, 0x00b4);                   /* Min. PIO cycle time */
366     put_le16(p + 82, 0x400c);                   /* Command Set supported */
367     put_le16(p + 83, 0x7068);                   /* Command Set supported */
368     put_le16(p + 84, 0x4000);                   /* Features supported */
369     put_le16(p + 85, 0x000c);                   /* Command Set enabled */
370     put_le16(p + 86, 0x7044);                   /* Command Set enabled */
371     put_le16(p + 87, 0x4000);                   /* Features enabled */
372     put_le16(p + 91, 0x4060);                   /* Current APM level */
373     put_le16(p + 129, 0x0002);                  /* Current features option */
374     put_le16(p + 130, 0x0005);                  /* Reassigned sectors */
375     put_le16(p + 131, 0x0001);                  /* Initial power mode */
376     put_le16(p + 132, 0x0000);                  /* User signature */
377     put_le16(p + 160, 0x8100);                  /* Power requirement */
378     put_le16(p + 161, 0x8001);                  /* CF command set */
379 
380     ide_cfata_identify_size(s);
381     s->identify_set = 1;
382 
383 fill_buffer:
384     memcpy(s->io_buffer, p, sizeof(s->identify_data));
385 }
386 
ide_set_signature(IDEState * s)387 static void ide_set_signature(IDEState *s)
388 {
389     s->select &= ~(ATA_DEV_HS); /* clear head */
390     /* put signature */
391     s->nsector = 1;
392     s->sector = 1;
393     if (s->drive_kind == IDE_CD) {
394         s->lcyl = 0x14;
395         s->hcyl = 0xeb;
396     } else if (s->blk) {
397         s->lcyl = 0;
398         s->hcyl = 0;
399     } else {
400         s->lcyl = 0xff;
401         s->hcyl = 0xff;
402     }
403 }
404 
ide_sect_range_ok(IDEState * s,uint64_t sector,uint64_t nb_sectors)405 static bool ide_sect_range_ok(IDEState *s,
406                               uint64_t sector, uint64_t nb_sectors)
407 {
408     uint64_t total_sectors;
409 
410     blk_get_geometry(s->blk, &total_sectors);
411     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
412         return false;
413     }
414     return true;
415 }
416 
417 typedef struct TrimAIOCB {
418     BlockAIOCB common;
419     IDEState *s;
420     QEMUBH *bh;
421     int ret;
422     QEMUIOVector *qiov;
423     BlockAIOCB *aiocb;
424     int i, j;
425 } TrimAIOCB;
426 
trim_aio_cancel(BlockAIOCB * acb)427 static void trim_aio_cancel(BlockAIOCB *acb)
428 {
429     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
430 
431     /* Exit the loop so ide_issue_trim_cb will not continue  */
432     iocb->j = iocb->qiov->niov - 1;
433     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
434 
435     iocb->ret = -ECANCELED;
436 
437     if (iocb->aiocb) {
438         blk_aio_cancel_async(iocb->aiocb);
439         iocb->aiocb = NULL;
440     }
441 }
442 
443 static const AIOCBInfo trim_aiocb_info = {
444     .aiocb_size         = sizeof(TrimAIOCB),
445     .cancel_async       = trim_aio_cancel,
446 };
447 
ide_trim_bh_cb(void * opaque)448 static void ide_trim_bh_cb(void *opaque)
449 {
450     TrimAIOCB *iocb = opaque;
451     BlockBackend *blk = iocb->s->blk;
452 
453     iocb->common.cb(iocb->common.opaque, iocb->ret);
454 
455     qemu_bh_delete(iocb->bh);
456     iocb->bh = NULL;
457     qemu_aio_unref(iocb);
458 
459     /* Paired with an increment in ide_issue_trim() */
460     blk_dec_in_flight(blk);
461 }
462 
ide_issue_trim_cb(void * opaque,int ret)463 static void ide_issue_trim_cb(void *opaque, int ret)
464 {
465     TrimAIOCB *iocb = opaque;
466     IDEState *s = iocb->s;
467 
468     if (iocb->i >= 0) {
469         if (ret >= 0) {
470             block_acct_done(blk_get_stats(s->blk), &s->acct);
471         } else {
472             block_acct_failed(blk_get_stats(s->blk), &s->acct);
473         }
474     }
475 
476     if (ret >= 0) {
477         while (iocb->j < iocb->qiov->niov) {
478             int j = iocb->j;
479             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
480                 int i = iocb->i;
481                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
482 
483                 /* 6-byte LBA + 2-byte range per entry */
484                 uint64_t entry = le64_to_cpu(buffer[i]);
485                 uint64_t sector = entry & 0x0000ffffffffffffULL;
486                 uint16_t count = entry >> 48;
487 
488                 if (count == 0) {
489                     continue;
490                 }
491 
492                 if (!ide_sect_range_ok(s, sector, count)) {
493                     block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
494                     iocb->ret = -EINVAL;
495                     goto done;
496                 }
497 
498                 block_acct_start(blk_get_stats(s->blk), &s->acct,
499                                  count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
500 
501                 /* Got an entry! Submit and exit.  */
502                 iocb->aiocb = blk_aio_pdiscard(s->blk,
503                                                sector << BDRV_SECTOR_BITS,
504                                                count << BDRV_SECTOR_BITS,
505                                                ide_issue_trim_cb, opaque);
506                 return;
507             }
508 
509             iocb->j++;
510             iocb->i = -1;
511         }
512     } else {
513         iocb->ret = ret;
514     }
515 
516 done:
517     iocb->aiocb = NULL;
518     if (iocb->bh) {
519         replay_bh_schedule_event(iocb->bh);
520     }
521 }
522 
ide_issue_trim(int64_t offset,QEMUIOVector * qiov,BlockCompletionFunc * cb,void * cb_opaque,void * opaque)523 BlockAIOCB *ide_issue_trim(
524         int64_t offset, QEMUIOVector *qiov,
525         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
526 {
527     IDEState *s = opaque;
528     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
529     TrimAIOCB *iocb;
530 
531     /* Paired with a decrement in ide_trim_bh_cb() */
532     blk_inc_in_flight(s->blk);
533 
534     iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
535     iocb->s = s;
536     iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb,
537                                    &DEVICE(dev)->mem_reentrancy_guard);
538     iocb->ret = 0;
539     iocb->qiov = qiov;
540     iocb->i = -1;
541     iocb->j = 0;
542     ide_issue_trim_cb(iocb, 0);
543     return &iocb->common;
544 }
545 
ide_abort_command(IDEState * s)546 void ide_abort_command(IDEState *s)
547 {
548     s->status = READY_STAT | ERR_STAT;
549     s->error = ABRT_ERR;
550     ide_transfer_stop(s);
551 }
552 
ide_set_retry(IDEState * s)553 static void ide_set_retry(IDEState *s)
554 {
555     s->bus->retry_unit = s->unit;
556     s->bus->retry_sector_num = ide_get_sector(s);
557     s->bus->retry_nsector = s->nsector;
558 }
559 
ide_clear_retry(IDEState * s)560 static void ide_clear_retry(IDEState *s)
561 {
562     s->bus->retry_unit = -1;
563     s->bus->retry_sector_num = 0;
564     s->bus->retry_nsector = 0;
565 }
566 
567 /* prepare data transfer and tell what to do after */
ide_transfer_start_norecurse(IDEState * s,uint8_t * buf,int size,EndTransferFunc * end_transfer_func)568 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
569                                   EndTransferFunc *end_transfer_func)
570 {
571     s->data_ptr = buf;
572     s->data_end = buf + size;
573     ide_set_retry(s);
574     if (!(s->status & ERR_STAT)) {
575         s->status |= DRQ_STAT;
576     }
577     if (!s->bus->dma->ops->pio_transfer) {
578         s->end_transfer_func = end_transfer_func;
579         return false;
580     }
581     s->bus->dma->ops->pio_transfer(s->bus->dma);
582     return true;
583 }
584 
ide_transfer_start(IDEState * s,uint8_t * buf,int size,EndTransferFunc * end_transfer_func)585 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
586                         EndTransferFunc *end_transfer_func)
587 {
588     if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
589         end_transfer_func(s);
590     }
591 }
592 
ide_cmd_done(IDEState * s)593 static void ide_cmd_done(IDEState *s)
594 {
595     if (s->bus->dma->ops->cmd_done) {
596         s->bus->dma->ops->cmd_done(s->bus->dma);
597     }
598 }
599 
ide_transfer_halt(IDEState * s)600 static void ide_transfer_halt(IDEState *s)
601 {
602     s->end_transfer_func = ide_transfer_stop;
603     s->data_ptr = s->io_buffer;
604     s->data_end = s->io_buffer;
605     s->status &= ~DRQ_STAT;
606 }
607 
ide_transfer_stop(IDEState * s)608 void ide_transfer_stop(IDEState *s)
609 {
610     ide_transfer_halt(s);
611     ide_cmd_done(s);
612 }
613 
ide_get_sector(IDEState * s)614 int64_t ide_get_sector(IDEState *s)
615 {
616     int64_t sector_num;
617     if (s->select & (ATA_DEV_LBA)) {
618         if (s->lba48) {
619             sector_num = ((int64_t)s->hob_hcyl << 40) |
620                 ((int64_t) s->hob_lcyl << 32) |
621                 ((int64_t) s->hob_sector << 24) |
622                 ((int64_t) s->hcyl << 16) |
623                 ((int64_t) s->lcyl << 8) | s->sector;
624         } else {
625             /* LBA28 */
626             sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
627                 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
628         }
629     } else {
630         /* CHS */
631         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
632             (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
633     }
634 
635     return sector_num;
636 }
637 
ide_set_sector(IDEState * s,int64_t sector_num)638 void ide_set_sector(IDEState *s, int64_t sector_num)
639 {
640     unsigned int cyl, r;
641     if (s->select & (ATA_DEV_LBA)) {
642         if (s->lba48) {
643             s->sector = sector_num;
644             s->lcyl = sector_num >> 8;
645             s->hcyl = sector_num >> 16;
646             s->hob_sector = sector_num >> 24;
647             s->hob_lcyl = sector_num >> 32;
648             s->hob_hcyl = sector_num >> 40;
649         } else {
650             /* LBA28 */
651             s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
652                 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
653             s->hcyl = (sector_num >> 16);
654             s->lcyl = (sector_num >> 8);
655             s->sector = (sector_num);
656         }
657     } else {
658         /* CHS */
659         cyl = sector_num / (s->heads * s->sectors);
660         r = sector_num % (s->heads * s->sectors);
661         s->hcyl = cyl >> 8;
662         s->lcyl = cyl;
663         s->select = (s->select & ~(ATA_DEV_HS)) |
664             ((r / s->sectors) & (ATA_DEV_HS));
665         s->sector = (r % s->sectors) + 1;
666     }
667 }
668 
ide_rw_error(IDEState * s)669 static void ide_rw_error(IDEState *s) {
670     ide_abort_command(s);
671     ide_bus_set_irq(s->bus);
672 }
673 
ide_buffered_readv_cb(void * opaque,int ret)674 static void ide_buffered_readv_cb(void *opaque, int ret)
675 {
676     IDEBufferedRequest *req = opaque;
677     if (!req->orphaned) {
678         if (!ret) {
679             assert(req->qiov.size == req->original_qiov->size);
680             qemu_iovec_from_buf(req->original_qiov, 0,
681                                 req->qiov.local_iov.iov_base,
682                                 req->original_qiov->size);
683         }
684         req->original_cb(req->original_opaque, ret);
685     }
686     QLIST_REMOVE(req, list);
687     qemu_vfree(qemu_iovec_buf(&req->qiov));
688     g_free(req);
689 }
690 
691 #define MAX_BUFFERED_REQS 16
692 
ide_buffered_readv(IDEState * s,int64_t sector_num,QEMUIOVector * iov,int nb_sectors,BlockCompletionFunc * cb,void * opaque)693 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
694                                QEMUIOVector *iov, int nb_sectors,
695                                BlockCompletionFunc *cb, void *opaque)
696 {
697     BlockAIOCB *aioreq;
698     IDEBufferedRequest *req;
699     int c = 0;
700 
701     QLIST_FOREACH(req, &s->buffered_requests, list) {
702         c++;
703     }
704     if (c > MAX_BUFFERED_REQS) {
705         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
706     }
707 
708     req = g_new0(IDEBufferedRequest, 1);
709     req->original_qiov = iov;
710     req->original_cb = cb;
711     req->original_opaque = opaque;
712     qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
713                         iov->size);
714 
715     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
716                             &req->qiov, 0, ide_buffered_readv_cb, req);
717 
718     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
719     return aioreq;
720 }
721 
722 /**
723  * Cancel all pending DMA requests.
724  * Any buffered DMA requests are instantly canceled,
725  * but any pending unbuffered DMA requests must be waited on.
726  */
ide_cancel_dma_sync(IDEState * s)727 void ide_cancel_dma_sync(IDEState *s)
728 {
729     IDEBufferedRequest *req;
730 
731     /* First invoke the callbacks of all buffered requests
732      * and flag those requests as orphaned. Ideally there
733      * are no unbuffered (Scatter Gather DMA Requests or
734      * write requests) pending and we can avoid to drain. */
735     QLIST_FOREACH(req, &s->buffered_requests, list) {
736         if (!req->orphaned) {
737             trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
738             req->original_cb(req->original_opaque, -ECANCELED);
739         }
740         req->orphaned = true;
741     }
742 
743     /*
744      * We can't cancel Scatter Gather DMA in the middle of the
745      * operation or a partial (not full) DMA transfer would reach
746      * the storage so we wait for completion instead (we behave
747      * like if the DMA was completed by the time the guest trying
748      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
749      * set).
750      *
751      * In the future we'll be able to safely cancel the I/O if the
752      * whole DMA operation will be submitted to disk with a single
753      * aio operation with preadv/pwritev.
754      */
755     if (s->bus->dma->aiocb) {
756         trace_ide_cancel_dma_sync_remaining();
757         blk_drain(s->blk);
758         assert(s->bus->dma->aiocb == NULL);
759     }
760 }
761 
762 static void ide_sector_read(IDEState *s);
763 
ide_sector_read_cb(void * opaque,int ret)764 static void ide_sector_read_cb(void *opaque, int ret)
765 {
766     IDEState *s = opaque;
767     int n;
768 
769     s->pio_aiocb = NULL;
770     s->status &= ~BUSY_STAT;
771 
772     if (ret != 0) {
773         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
774                                 IDE_RETRY_READ)) {
775             return;
776         }
777     }
778 
779     block_acct_done(blk_get_stats(s->blk), &s->acct);
780 
781     n = s->nsector;
782     if (n > s->req_nb_sectors) {
783         n = s->req_nb_sectors;
784     }
785 
786     ide_set_sector(s, ide_get_sector(s) + n);
787     s->nsector -= n;
788     /* Allow the guest to read the io_buffer */
789     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
790     ide_bus_set_irq(s->bus);
791 }
792 
ide_sector_read(IDEState * s)793 static void ide_sector_read(IDEState *s)
794 {
795     int64_t sector_num;
796     int n;
797 
798     s->status = READY_STAT | SEEK_STAT;
799     s->error = 0; /* not needed by IDE spec, but needed by Windows */
800     sector_num = ide_get_sector(s);
801     n = s->nsector;
802 
803     if (n == 0) {
804         ide_transfer_stop(s);
805         return;
806     }
807 
808     s->status |= BUSY_STAT;
809 
810     if (n > s->req_nb_sectors) {
811         n = s->req_nb_sectors;
812     }
813 
814     trace_ide_sector_read(sector_num, n);
815 
816     if (!ide_sect_range_ok(s, sector_num, n)) {
817         ide_rw_error(s);
818         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
819         return;
820     }
821 
822     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
823 
824     block_acct_start(blk_get_stats(s->blk), &s->acct,
825                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
826     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
827                                       ide_sector_read_cb, s);
828 }
829 
dma_buf_commit(IDEState * s,uint32_t tx_bytes)830 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
831 {
832     if (s->bus->dma->ops->commit_buf) {
833         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
834     }
835     s->io_buffer_offset += tx_bytes;
836     qemu_sglist_destroy(&s->sg);
837 }
838 
ide_set_inactive(IDEState * s,bool more)839 void ide_set_inactive(IDEState *s, bool more)
840 {
841     s->bus->dma->aiocb = NULL;
842     ide_clear_retry(s);
843     if (s->bus->dma->ops->set_inactive) {
844         s->bus->dma->ops->set_inactive(s->bus->dma, more);
845     }
846     ide_cmd_done(s);
847 }
848 
ide_dma_error(IDEState * s)849 void ide_dma_error(IDEState *s)
850 {
851     dma_buf_commit(s, 0);
852     ide_abort_command(s);
853     ide_set_inactive(s, false);
854     ide_bus_set_irq(s->bus);
855 }
856 
ide_handle_rw_error(IDEState * s,int error,int op)857 int ide_handle_rw_error(IDEState *s, int error, int op)
858 {
859     bool is_read = (op & IDE_RETRY_READ) != 0;
860     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
861 
862     if (action == BLOCK_ERROR_ACTION_STOP) {
863         assert(s->bus->retry_unit == s->unit);
864         s->bus->error_status = op;
865     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
866         block_acct_failed(blk_get_stats(s->blk), &s->acct);
867         if (IS_IDE_RETRY_DMA(op)) {
868             ide_dma_error(s);
869         } else if (IS_IDE_RETRY_ATAPI(op)) {
870             ide_atapi_io_error(s, -error);
871         } else {
872             ide_rw_error(s);
873         }
874     }
875     blk_error_action(s->blk, action, is_read, error);
876     return action != BLOCK_ERROR_ACTION_IGNORE;
877 }
878 
ide_dma_cb(void * opaque,int ret)879 static void ide_dma_cb(void *opaque, int ret)
880 {
881     IDEState *s = opaque;
882     int n;
883     int64_t sector_num;
884     uint64_t offset;
885     bool stay_active = false;
886     int32_t prep_size = 0;
887 
888     if (ret == -EINVAL) {
889         ide_dma_error(s);
890         return;
891     }
892 
893     if (ret < 0) {
894         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
895             s->bus->dma->aiocb = NULL;
896             dma_buf_commit(s, 0);
897             return;
898         }
899     }
900 
901     if (s->io_buffer_size > s->nsector * 512) {
902         /*
903          * The PRDs were longer than needed for this request.
904          * The Active bit must remain set after the request completes.
905          */
906         n = s->nsector;
907         stay_active = true;
908     } else {
909         n = s->io_buffer_size >> 9;
910     }
911 
912     sector_num = ide_get_sector(s);
913     if (n > 0) {
914         assert(n * 512 == s->sg.size);
915         dma_buf_commit(s, s->sg.size);
916         sector_num += n;
917         ide_set_sector(s, sector_num);
918         s->nsector -= n;
919     }
920 
921     /* end of transfer ? */
922     if (s->nsector == 0) {
923         s->status = READY_STAT | SEEK_STAT;
924         ide_bus_set_irq(s->bus);
925         goto eot;
926     }
927 
928     /* launch next transfer */
929     n = s->nsector;
930     s->io_buffer_index = 0;
931     s->io_buffer_size = n * 512;
932     prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
933     /* prepare_buf() must succeed and respect the limit */
934     assert(prep_size >= 0 && prep_size <= n * 512);
935 
936     /*
937      * Now prep_size stores the number of bytes in the sglist, and
938      * s->io_buffer_size stores the number of bytes described by the PRDs.
939      */
940 
941     if (prep_size < n * 512) {
942         /*
943          * The PRDs are too short for this request. Error condition!
944          * Reset the Active bit and don't raise the interrupt.
945          */
946         s->status = READY_STAT | SEEK_STAT;
947         dma_buf_commit(s, 0);
948         goto eot;
949     }
950 
951     trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
952 
953     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
954         !ide_sect_range_ok(s, sector_num, n)) {
955         ide_dma_error(s);
956         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
957         return;
958     }
959 
960     offset = sector_num << BDRV_SECTOR_BITS;
961     switch (s->dma_cmd) {
962     case IDE_DMA_READ:
963         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
964                                           BDRV_SECTOR_SIZE, ide_dma_cb, s);
965         break;
966     case IDE_DMA_WRITE:
967         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
968                                            BDRV_SECTOR_SIZE, ide_dma_cb, s);
969         break;
970     case IDE_DMA_TRIM:
971         s->bus->dma->aiocb = dma_blk_io(&s->sg, offset, BDRV_SECTOR_SIZE,
972                                         ide_issue_trim, s, ide_dma_cb, s,
973                                         DMA_DIRECTION_TO_DEVICE);
974         break;
975     default:
976         abort();
977     }
978     return;
979 
980 eot:
981     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
982         block_acct_done(blk_get_stats(s->blk), &s->acct);
983     }
984     ide_set_inactive(s, stay_active);
985 }
986 
ide_sector_start_dma(IDEState * s,enum ide_dma_cmd dma_cmd)987 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
988 {
989     s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
990     s->io_buffer_size = 0;
991     s->dma_cmd = dma_cmd;
992 
993     switch (dma_cmd) {
994     case IDE_DMA_READ:
995         block_acct_start(blk_get_stats(s->blk), &s->acct,
996                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
997         break;
998     case IDE_DMA_WRITE:
999         block_acct_start(blk_get_stats(s->blk), &s->acct,
1000                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1001         break;
1002     default:
1003         break;
1004     }
1005 
1006     ide_start_dma(s, ide_dma_cb);
1007 }
1008 
ide_start_dma(IDEState * s,BlockCompletionFunc * cb)1009 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
1010 {
1011     s->io_buffer_index = 0;
1012     ide_set_retry(s);
1013     if (s->bus->dma->ops->start_dma) {
1014         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1015     }
1016 }
1017 
1018 static void ide_sector_write(IDEState *s);
1019 
ide_sector_write_timer_cb(void * opaque)1020 static void ide_sector_write_timer_cb(void *opaque)
1021 {
1022     IDEState *s = opaque;
1023     ide_bus_set_irq(s->bus);
1024 }
1025 
ide_sector_write_cb(void * opaque,int ret)1026 static void ide_sector_write_cb(void *opaque, int ret)
1027 {
1028     IDEState *s = opaque;
1029     int n;
1030 
1031     s->pio_aiocb = NULL;
1032     s->status &= ~BUSY_STAT;
1033 
1034     if (ret != 0) {
1035         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1036             return;
1037         }
1038     }
1039 
1040     block_acct_done(blk_get_stats(s->blk), &s->acct);
1041 
1042     n = s->nsector;
1043     if (n > s->req_nb_sectors) {
1044         n = s->req_nb_sectors;
1045     }
1046     s->nsector -= n;
1047 
1048     ide_set_sector(s, ide_get_sector(s) + n);
1049     if (s->nsector == 0) {
1050         /* no more sectors to write */
1051         ide_transfer_stop(s);
1052     } else {
1053         int n1 = s->nsector;
1054         if (n1 > s->req_nb_sectors) {
1055             n1 = s->req_nb_sectors;
1056         }
1057         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1058                            ide_sector_write);
1059     }
1060 
1061     if (s->win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1062         /* It seems there is a bug in the Windows 2000 installer HDD
1063            IDE driver which fills the disk with empty logs when the
1064            IDE write IRQ comes too early. This hack tries to correct
1065            that at the expense of slower write performances. Use this
1066            option _only_ to install Windows 2000. You must disable it
1067            for normal use. */
1068         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1069                   (NANOSECONDS_PER_SECOND / 1000));
1070     } else {
1071         ide_bus_set_irq(s->bus);
1072     }
1073 }
1074 
ide_sector_write(IDEState * s)1075 static void ide_sector_write(IDEState *s)
1076 {
1077     int64_t sector_num;
1078     int n;
1079 
1080     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1081     sector_num = ide_get_sector(s);
1082 
1083     n = s->nsector;
1084     if (n > s->req_nb_sectors) {
1085         n = s->req_nb_sectors;
1086     }
1087 
1088     trace_ide_sector_write(sector_num, n);
1089 
1090     if (!ide_sect_range_ok(s, sector_num, n)) {
1091         ide_rw_error(s);
1092         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1093         return;
1094     }
1095 
1096     qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1097 
1098     block_acct_start(blk_get_stats(s->blk), &s->acct,
1099                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1100     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1101                                    &s->qiov, 0, ide_sector_write_cb, s);
1102 }
1103 
ide_flush_cb(void * opaque,int ret)1104 static void ide_flush_cb(void *opaque, int ret)
1105 {
1106     IDEState *s = opaque;
1107 
1108     s->pio_aiocb = NULL;
1109 
1110     if (ret < 0) {
1111         /* XXX: What sector number to set here? */
1112         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1113             return;
1114         }
1115     }
1116 
1117     if (s->blk) {
1118         block_acct_done(blk_get_stats(s->blk), &s->acct);
1119     }
1120     s->status = READY_STAT | SEEK_STAT;
1121     ide_cmd_done(s);
1122     ide_bus_set_irq(s->bus);
1123 }
1124 
ide_flush_cache(IDEState * s)1125 static void ide_flush_cache(IDEState *s)
1126 {
1127     if (s->blk == NULL) {
1128         ide_flush_cb(s, 0);
1129         return;
1130     }
1131 
1132     s->status |= BUSY_STAT;
1133     ide_set_retry(s);
1134     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1135     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1136 }
1137 
ide_cfata_metadata_inquiry(IDEState * s)1138 static void ide_cfata_metadata_inquiry(IDEState *s)
1139 {
1140     uint16_t *p;
1141     uint32_t spd;
1142 
1143     p = (uint16_t *) s->io_buffer;
1144     memset(p, 0, 0x200);
1145     spd = ((s->mdata_size - 1) >> 9) + 1;
1146 
1147     put_le16(p + 0, 0x0001);                    /* Data format revision */
1148     put_le16(p + 1, 0x0000);                    /* Media property: silicon */
1149     put_le16(p + 2, s->media_changed);          /* Media status */
1150     put_le16(p + 3, s->mdata_size & 0xffff);    /* Capacity in bytes (low) */
1151     put_le16(p + 4, s->mdata_size >> 16);       /* Capacity in bytes (high) */
1152     put_le16(p + 5, spd & 0xffff);              /* Sectors per device (low) */
1153     put_le16(p + 6, spd >> 16);                 /* Sectors per device (high) */
1154 }
1155 
ide_cfata_metadata_read(IDEState * s)1156 static void ide_cfata_metadata_read(IDEState *s)
1157 {
1158     uint16_t *p;
1159 
1160     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1161         s->status = ERR_STAT;
1162         s->error = ABRT_ERR;
1163         return;
1164     }
1165 
1166     p = (uint16_t *) s->io_buffer;
1167     memset(p, 0, 0x200);
1168 
1169     put_le16(p + 0, s->media_changed);          /* Media status */
1170     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1171                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1172                                     s->nsector << 9), 0x200 - 2));
1173 }
1174 
ide_cfata_metadata_write(IDEState * s)1175 static void ide_cfata_metadata_write(IDEState *s)
1176 {
1177     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1178         s->status = ERR_STAT;
1179         s->error = ABRT_ERR;
1180         return;
1181     }
1182 
1183     s->media_changed = 0;
1184 
1185     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1186                     s->io_buffer + 2,
1187                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1188                                     s->nsector << 9), 0x200 - 2));
1189 }
1190 
1191 /* called when the inserted state of the media has changed */
ide_cd_change_cb(void * opaque,bool load,Error ** errp)1192 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1193 {
1194     IDEState *s = opaque;
1195     uint64_t nb_sectors;
1196 
1197     s->tray_open = !load;
1198     blk_get_geometry(s->blk, &nb_sectors);
1199     s->nb_sectors = nb_sectors;
1200 
1201     /*
1202      * First indicate to the guest that a CD has been removed.  That's
1203      * done on the next command the guest sends us.
1204      *
1205      * Then we set UNIT_ATTENTION, by which the guest will
1206      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1207      */
1208     s->cdrom_changed = 1;
1209     s->events.new_media = true;
1210     s->events.eject_request = false;
1211     ide_bus_set_irq(s->bus);
1212 }
1213 
ide_cd_eject_request_cb(void * opaque,bool force)1214 static void ide_cd_eject_request_cb(void *opaque, bool force)
1215 {
1216     IDEState *s = opaque;
1217 
1218     s->events.eject_request = true;
1219     if (force) {
1220         s->tray_locked = false;
1221     }
1222     ide_bus_set_irq(s->bus);
1223 }
1224 
ide_cmd_lba48_transform(IDEState * s,int lba48)1225 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1226 {
1227     s->lba48 = lba48;
1228 
1229     /* handle the 'magic' 0 nsector count conversion here. to avoid
1230      * fiddling with the rest of the read logic, we just store the
1231      * full sector count in ->nsector and ignore ->hob_nsector from now
1232      */
1233     if (!s->lba48) {
1234         if (!s->nsector)
1235             s->nsector = 256;
1236     } else {
1237         if (!s->nsector && !s->hob_nsector)
1238             s->nsector = 65536;
1239         else {
1240             int lo = s->nsector;
1241             int hi = s->hob_nsector;
1242 
1243             s->nsector = (hi << 8) | lo;
1244         }
1245     }
1246 }
1247 
ide_clear_hob(IDEBus * bus)1248 static void ide_clear_hob(IDEBus *bus)
1249 {
1250     /* any write clears HOB high bit of device control register */
1251     bus->cmd &= ~(IDE_CTRL_HOB);
1252 }
1253 
1254 /* IOport [W]rite [R]egisters */
1255 enum ATA_IOPORT_WR {
1256     ATA_IOPORT_WR_DATA = 0,
1257     ATA_IOPORT_WR_FEATURES = 1,
1258     ATA_IOPORT_WR_SECTOR_COUNT = 2,
1259     ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1260     ATA_IOPORT_WR_CYLINDER_LOW = 4,
1261     ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1262     ATA_IOPORT_WR_DEVICE_HEAD = 6,
1263     ATA_IOPORT_WR_COMMAND = 7,
1264     ATA_IOPORT_WR_NUM_REGISTERS,
1265 };
1266 
1267 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1268     [ATA_IOPORT_WR_DATA] = "Data",
1269     [ATA_IOPORT_WR_FEATURES] = "Features",
1270     [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1271     [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1272     [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1273     [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1274     [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1275     [ATA_IOPORT_WR_COMMAND] = "Command"
1276 };
1277 
ide_ioport_write(void * opaque,uint32_t addr,uint32_t val)1278 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1279 {
1280     IDEBus *bus = opaque;
1281     IDEState *s = ide_bus_active_if(bus);
1282     int reg_num = addr & 7;
1283 
1284     trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1285 
1286     /* ignore writes to command block while busy with previous command */
1287     if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1288         return;
1289     }
1290 
1291     /* NOTE: Device0 and Device1 both receive incoming register writes.
1292      * (They're on the same bus! They have to!) */
1293 
1294     switch (reg_num) {
1295     case 0:
1296         break;
1297     case ATA_IOPORT_WR_FEATURES:
1298         ide_clear_hob(bus);
1299         bus->ifs[0].hob_feature = bus->ifs[0].feature;
1300         bus->ifs[1].hob_feature = bus->ifs[1].feature;
1301         bus->ifs[0].feature = val;
1302         bus->ifs[1].feature = val;
1303         break;
1304     case ATA_IOPORT_WR_SECTOR_COUNT:
1305         ide_clear_hob(bus);
1306         bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1307         bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1308         bus->ifs[0].nsector = val;
1309         bus->ifs[1].nsector = val;
1310         break;
1311     case ATA_IOPORT_WR_SECTOR_NUMBER:
1312         ide_clear_hob(bus);
1313         bus->ifs[0].hob_sector = bus->ifs[0].sector;
1314         bus->ifs[1].hob_sector = bus->ifs[1].sector;
1315         bus->ifs[0].sector = val;
1316         bus->ifs[1].sector = val;
1317         break;
1318     case ATA_IOPORT_WR_CYLINDER_LOW:
1319         ide_clear_hob(bus);
1320         bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1321         bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1322         bus->ifs[0].lcyl = val;
1323         bus->ifs[1].lcyl = val;
1324         break;
1325     case ATA_IOPORT_WR_CYLINDER_HIGH:
1326         ide_clear_hob(bus);
1327         bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1328         bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1329         bus->ifs[0].hcyl = val;
1330         bus->ifs[1].hcyl = val;
1331         break;
1332     case ATA_IOPORT_WR_DEVICE_HEAD:
1333         ide_clear_hob(bus);
1334         bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1335         bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1336         /* select drive */
1337         bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1338         break;
1339     default:
1340     case ATA_IOPORT_WR_COMMAND:
1341         ide_clear_hob(bus);
1342         qemu_irq_lower(bus->irq);
1343         ide_bus_exec_cmd(bus, val);
1344         break;
1345     }
1346 }
1347 
ide_reset(IDEState * s)1348 static void ide_reset(IDEState *s)
1349 {
1350     trace_ide_reset(s);
1351 
1352     if (s->pio_aiocb) {
1353         blk_aio_cancel(s->pio_aiocb);
1354         s->pio_aiocb = NULL;
1355     }
1356 
1357     if (s->reset_reverts) {
1358         s->reset_reverts = false;
1359         s->heads         = s->drive_heads;
1360         s->sectors       = s->drive_sectors;
1361     }
1362     if (s->drive_kind == IDE_CFATA)
1363         s->mult_sectors = 0;
1364     else
1365         s->mult_sectors = MAX_MULT_SECTORS;
1366     /* ide regs */
1367     s->feature = 0;
1368     s->error = 0;
1369     s->nsector = 0;
1370     s->sector = 0;
1371     s->lcyl = 0;
1372     s->hcyl = 0;
1373 
1374     /* lba48 */
1375     s->hob_feature = 0;
1376     s->hob_sector = 0;
1377     s->hob_nsector = 0;
1378     s->hob_lcyl = 0;
1379     s->hob_hcyl = 0;
1380 
1381     s->select = (ATA_DEV_ALWAYS_ON);
1382     s->status = READY_STAT | SEEK_STAT;
1383 
1384     s->lba48 = 0;
1385 
1386     /* ATAPI specific */
1387     s->sense_key = 0;
1388     s->asc = 0;
1389     s->cdrom_changed = 0;
1390     s->packet_transfer_size = 0;
1391     s->elementary_transfer_size = 0;
1392     s->io_buffer_index = 0;
1393     s->cd_sector_size = 0;
1394     s->atapi_dma = 0;
1395     s->tray_locked = 0;
1396     s->tray_open = 0;
1397     /* ATA DMA state */
1398     s->io_buffer_size = 0;
1399     s->req_nb_sectors = 0;
1400 
1401     ide_set_signature(s);
1402     /* init the transfer handler so that 0xffff is returned on data
1403        accesses */
1404     s->end_transfer_func = ide_dummy_transfer_stop;
1405     ide_dummy_transfer_stop(s);
1406     s->media_changed = 0;
1407 }
1408 
cmd_nop(IDEState * s,uint8_t cmd)1409 static bool cmd_nop(IDEState *s, uint8_t cmd)
1410 {
1411     return true;
1412 }
1413 
cmd_device_reset(IDEState * s,uint8_t cmd)1414 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1415 {
1416     /* Halt PIO (in the DRQ phase), then DMA */
1417     ide_transfer_halt(s);
1418     ide_cancel_dma_sync(s);
1419 
1420     /* Reset any PIO commands, reset signature, etc */
1421     ide_reset(s);
1422 
1423     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1424      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1425     s->status = 0x00;
1426 
1427     /* Do not overwrite status register */
1428     return false;
1429 }
1430 
cmd_data_set_management(IDEState * s,uint8_t cmd)1431 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1432 {
1433     switch (s->feature) {
1434     case DSM_TRIM:
1435         if (s->blk) {
1436             ide_sector_start_dma(s, IDE_DMA_TRIM);
1437             return false;
1438         }
1439         break;
1440     }
1441 
1442     ide_abort_command(s);
1443     return true;
1444 }
1445 
cmd_identify(IDEState * s,uint8_t cmd)1446 static bool cmd_identify(IDEState *s, uint8_t cmd)
1447 {
1448     if (s->blk && s->drive_kind != IDE_CD) {
1449         if (s->drive_kind != IDE_CFATA) {
1450             ide_identify(s);
1451         } else {
1452             ide_cfata_identify(s);
1453         }
1454         s->status = READY_STAT | SEEK_STAT;
1455         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1456         ide_bus_set_irq(s->bus);
1457         return false;
1458     } else {
1459         if (s->drive_kind == IDE_CD) {
1460             ide_set_signature(s);
1461         }
1462         ide_abort_command(s);
1463     }
1464 
1465     return true;
1466 }
1467 
cmd_verify(IDEState * s,uint8_t cmd)1468 static bool cmd_verify(IDEState *s, uint8_t cmd)
1469 {
1470     bool lba48 = (cmd == WIN_VERIFY_EXT);
1471 
1472     /* do sector number check ? */
1473     ide_cmd_lba48_transform(s, lba48);
1474 
1475     return true;
1476 }
1477 
cmd_set_multiple_mode(IDEState * s,uint8_t cmd)1478 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1479 {
1480     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1481         /* Disable Read and Write Multiple */
1482         s->mult_sectors = 0;
1483     } else if ((s->nsector & 0xff) != 0 &&
1484         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1485          (s->nsector & (s->nsector - 1)) != 0)) {
1486         ide_abort_command(s);
1487     } else {
1488         s->mult_sectors = s->nsector & 0xff;
1489     }
1490 
1491     return true;
1492 }
1493 
cmd_read_multiple(IDEState * s,uint8_t cmd)1494 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1495 {
1496     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1497 
1498     if (!s->blk || !s->mult_sectors) {
1499         ide_abort_command(s);
1500         return true;
1501     }
1502 
1503     ide_cmd_lba48_transform(s, lba48);
1504     s->req_nb_sectors = s->mult_sectors;
1505     ide_sector_read(s);
1506     return false;
1507 }
1508 
cmd_write_multiple(IDEState * s,uint8_t cmd)1509 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1510 {
1511     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1512     int n;
1513 
1514     if (!s->blk || !s->mult_sectors) {
1515         ide_abort_command(s);
1516         return true;
1517     }
1518 
1519     ide_cmd_lba48_transform(s, lba48);
1520 
1521     s->req_nb_sectors = s->mult_sectors;
1522     n = MIN(s->nsector, s->req_nb_sectors);
1523 
1524     s->status = SEEK_STAT | READY_STAT;
1525     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1526 
1527     s->media_changed = 1;
1528 
1529     return false;
1530 }
1531 
cmd_read_pio(IDEState * s,uint8_t cmd)1532 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1533 {
1534     bool lba48 = (cmd == WIN_READ_EXT);
1535 
1536     if (s->drive_kind == IDE_CD) {
1537         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1538         ide_abort_command(s);
1539         return true;
1540     }
1541 
1542     if (!s->blk) {
1543         ide_abort_command(s);
1544         return true;
1545     }
1546 
1547     ide_cmd_lba48_transform(s, lba48);
1548     s->req_nb_sectors = 1;
1549     ide_sector_read(s);
1550 
1551     return false;
1552 }
1553 
cmd_write_pio(IDEState * s,uint8_t cmd)1554 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1555 {
1556     bool lba48 = (cmd == WIN_WRITE_EXT);
1557 
1558     if (!s->blk) {
1559         ide_abort_command(s);
1560         return true;
1561     }
1562 
1563     ide_cmd_lba48_transform(s, lba48);
1564 
1565     s->req_nb_sectors = 1;
1566     s->status = SEEK_STAT | READY_STAT;
1567     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1568 
1569     s->media_changed = 1;
1570 
1571     return false;
1572 }
1573 
cmd_read_dma(IDEState * s,uint8_t cmd)1574 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1575 {
1576     bool lba48 = (cmd == WIN_READDMA_EXT);
1577 
1578     if (!s->blk) {
1579         ide_abort_command(s);
1580         return true;
1581     }
1582 
1583     ide_cmd_lba48_transform(s, lba48);
1584     ide_sector_start_dma(s, IDE_DMA_READ);
1585 
1586     return false;
1587 }
1588 
cmd_write_dma(IDEState * s,uint8_t cmd)1589 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1590 {
1591     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1592 
1593     if (!s->blk) {
1594         ide_abort_command(s);
1595         return true;
1596     }
1597 
1598     ide_cmd_lba48_transform(s, lba48);
1599     ide_sector_start_dma(s, IDE_DMA_WRITE);
1600 
1601     s->media_changed = 1;
1602 
1603     return false;
1604 }
1605 
cmd_flush_cache(IDEState * s,uint8_t cmd)1606 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1607 {
1608     ide_flush_cache(s);
1609     return false;
1610 }
1611 
cmd_seek(IDEState * s,uint8_t cmd)1612 static bool cmd_seek(IDEState *s, uint8_t cmd)
1613 {
1614     /* XXX: Check that seek is within bounds */
1615     return true;
1616 }
1617 
cmd_read_native_max(IDEState * s,uint8_t cmd)1618 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1619 {
1620     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1621 
1622     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1623     if (s->nb_sectors == 0) {
1624         ide_abort_command(s);
1625     } else {
1626         /*
1627          * Save the active drive parameters, which may have been
1628          * limited from their native counterparts by, e.g., INITIALIZE
1629          * DEVICE PARAMETERS or SET MAX ADDRESS.
1630          */
1631         const int aheads = s->heads;
1632         const int asectors = s->sectors;
1633 
1634         s->heads = s->drive_heads;
1635         s->sectors = s->drive_sectors;
1636 
1637         ide_cmd_lba48_transform(s, lba48);
1638         ide_set_sector(s, s->nb_sectors - 1);
1639 
1640         s->heads = aheads;
1641         s->sectors = asectors;
1642     }
1643 
1644     return true;
1645 }
1646 
cmd_check_power_mode(IDEState * s,uint8_t cmd)1647 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1648 {
1649     s->nsector = 0xff; /* device active or idle */
1650     return true;
1651 }
1652 
1653 /* INITIALIZE DEVICE PARAMETERS */
cmd_specify(IDEState * s,uint8_t cmd)1654 static bool cmd_specify(IDEState *s, uint8_t cmd)
1655 {
1656     if (s->blk && s->drive_kind != IDE_CD) {
1657         s->heads = (s->select & (ATA_DEV_HS)) + 1;
1658         s->sectors = s->nsector;
1659         ide_bus_set_irq(s->bus);
1660     } else {
1661         ide_abort_command(s);
1662     }
1663 
1664     return true;
1665 }
1666 
cmd_set_features(IDEState * s,uint8_t cmd)1667 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1668 {
1669     uint16_t *identify_data;
1670 
1671     if (!s->blk) {
1672         ide_abort_command(s);
1673         return true;
1674     }
1675 
1676     /* XXX: valid for CDROM ? */
1677     switch (s->feature) {
1678     case 0x01: /* 8-bit I/O enable (CompactFlash) */
1679     case 0x81: /* 8-bit I/O disable (CompactFlash) */
1680         if (s->drive_kind != IDE_CFATA) {
1681             goto abort_cmd;
1682         }
1683         s->io8 = !(s->feature & 0x80);
1684         return true;
1685     case 0x02: /* write cache enable */
1686         blk_set_enable_write_cache(s->blk, true);
1687         identify_data = (uint16_t *)s->identify_data;
1688         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1689         return true;
1690     case 0x82: /* write cache disable */
1691         blk_set_enable_write_cache(s->blk, false);
1692         identify_data = (uint16_t *)s->identify_data;
1693         put_le16(identify_data + 85, (1 << 14) | 1);
1694         ide_flush_cache(s);
1695         return false;
1696     case 0xcc: /* reverting to power-on defaults enable */
1697         s->reset_reverts = true;
1698         return true;
1699     case 0x66: /* reverting to power-on defaults disable */
1700         s->reset_reverts = false;
1701         return true;
1702     case 0xaa: /* read look-ahead enable */
1703     case 0x55: /* read look-ahead disable */
1704     case 0x05: /* set advanced power management mode */
1705     case 0x85: /* disable advanced power management mode */
1706     case 0x69: /* NOP */
1707     case 0x67: /* NOP */
1708     case 0x96: /* NOP */
1709     case 0x9a: /* NOP */
1710     case 0x42: /* enable Automatic Acoustic Mode */
1711     case 0xc2: /* disable Automatic Acoustic Mode */
1712         return true;
1713     case 0x03: /* set transfer mode */
1714         {
1715             uint8_t val = s->nsector & 0x07;
1716             identify_data = (uint16_t *)s->identify_data;
1717 
1718             switch (s->nsector >> 3) {
1719             case 0x00: /* pio default */
1720             case 0x01: /* pio mode */
1721                 put_le16(identify_data + 62, 0x07);
1722                 put_le16(identify_data + 63, 0x07);
1723                 put_le16(identify_data + 88, 0x3f);
1724                 break;
1725             case 0x02: /* single word dma mode */
1726                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1727                 put_le16(identify_data + 63, 0x07);
1728                 put_le16(identify_data + 88, 0x3f);
1729                 break;
1730             case 0x04: /* mdma mode */
1731                 put_le16(identify_data + 62, 0x07);
1732                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1733                 put_le16(identify_data + 88, 0x3f);
1734                 break;
1735             case 0x08: /* udma mode */
1736                 put_le16(identify_data + 62, 0x07);
1737                 put_le16(identify_data + 63, 0x07);
1738                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1739                 break;
1740             default:
1741                 goto abort_cmd;
1742             }
1743             return true;
1744         }
1745     }
1746 
1747 abort_cmd:
1748     ide_abort_command(s);
1749     return true;
1750 }
1751 
1752 
1753 /*** ATAPI commands ***/
1754 
cmd_identify_packet(IDEState * s,uint8_t cmd)1755 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1756 {
1757     ide_atapi_identify(s);
1758     s->status = READY_STAT | SEEK_STAT;
1759     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1760     ide_bus_set_irq(s->bus);
1761     return false;
1762 }
1763 
1764 /* EXECUTE DEVICE DIAGNOSTIC */
cmd_exec_dev_diagnostic(IDEState * s,uint8_t cmd)1765 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1766 {
1767     /*
1768      * Clear the device register per the ATA (v6) specification,
1769      * because ide_set_signature does not clear LBA or drive bits.
1770      */
1771     s->select = (ATA_DEV_ALWAYS_ON);
1772     ide_set_signature(s);
1773 
1774     if (s->drive_kind == IDE_CD) {
1775         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1776                         * devices to return a clear status register
1777                         * with READY_STAT *not* set. */
1778         s->error = 0x01;
1779     } else {
1780         s->status = READY_STAT | SEEK_STAT;
1781         /* The bits of the error register are not as usual for this command!
1782          * They are part of the regular output (this is why ERR_STAT isn't set)
1783          * Device 0 passed, Device 1 passed or not present. */
1784         s->error = 0x01;
1785         ide_bus_set_irq(s->bus);
1786     }
1787 
1788     return false;
1789 }
1790 
cmd_packet(IDEState * s,uint8_t cmd)1791 static bool cmd_packet(IDEState *s, uint8_t cmd)
1792 {
1793     /* overlapping commands not supported */
1794     if (s->feature & 0x02) {
1795         ide_abort_command(s);
1796         return true;
1797     }
1798 
1799     s->status = READY_STAT | SEEK_STAT;
1800     s->atapi_dma = s->feature & 1;
1801     if (s->atapi_dma) {
1802         s->dma_cmd = IDE_DMA_ATAPI;
1803     }
1804     s->nsector = 1;
1805     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1806                        ide_atapi_cmd);
1807     return false;
1808 }
1809 
1810 
1811 /*** CF-ATA commands ***/
1812 
cmd_cfa_req_ext_error_code(IDEState * s,uint8_t cmd)1813 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1814 {
1815     s->error = 0x09;    /* miscellaneous error */
1816     s->status = READY_STAT | SEEK_STAT;
1817     ide_bus_set_irq(s->bus);
1818 
1819     return false;
1820 }
1821 
cmd_cfa_erase_sectors(IDEState * s,uint8_t cmd)1822 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1823 {
1824     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1825      * required for Windows 8 to work with AHCI */
1826 
1827     if (cmd == CFA_WEAR_LEVEL) {
1828         s->nsector = 0;
1829     }
1830 
1831     if (cmd == CFA_ERASE_SECTORS) {
1832         s->media_changed = 1;
1833     }
1834 
1835     return true;
1836 }
1837 
cmd_cfa_translate_sector(IDEState * s,uint8_t cmd)1838 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1839 {
1840     s->status = READY_STAT | SEEK_STAT;
1841 
1842     memset(s->io_buffer, 0, 0x200);
1843     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1844     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1845     s->io_buffer[0x02] = s->select;                 /* Head */
1846     s->io_buffer[0x03] = s->sector;                 /* Sector */
1847     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1848     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1849     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1850     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1851     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1852     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1853     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1854 
1855     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1856     ide_bus_set_irq(s->bus);
1857 
1858     return false;
1859 }
1860 
cmd_cfa_access_metadata_storage(IDEState * s,uint8_t cmd)1861 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1862 {
1863     switch (s->feature) {
1864     case 0x02:  /* Inquiry Metadata Storage */
1865         ide_cfata_metadata_inquiry(s);
1866         break;
1867     case 0x03:  /* Read Metadata Storage */
1868         ide_cfata_metadata_read(s);
1869         break;
1870     case 0x04:  /* Write Metadata Storage */
1871         ide_cfata_metadata_write(s);
1872         break;
1873     default:
1874         ide_abort_command(s);
1875         return true;
1876     }
1877 
1878     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1879     s->status = 0x00; /* NOTE: READY is _not_ set */
1880     ide_bus_set_irq(s->bus);
1881 
1882     return false;
1883 }
1884 
cmd_ibm_sense_condition(IDEState * s,uint8_t cmd)1885 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1886 {
1887     switch (s->feature) {
1888     case 0x01:  /* sense temperature in device */
1889         s->nsector = 0x50;      /* +20 C */
1890         break;
1891     default:
1892         ide_abort_command(s);
1893         return true;
1894     }
1895 
1896     return true;
1897 }
1898 
1899 
1900 /*** SMART commands ***/
1901 
cmd_smart(IDEState * s,uint8_t cmd)1902 static bool cmd_smart(IDEState *s, uint8_t cmd)
1903 {
1904     int n;
1905 
1906     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1907         goto abort_cmd;
1908     }
1909 
1910     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1911         goto abort_cmd;
1912     }
1913 
1914     switch (s->feature) {
1915     case SMART_DISABLE:
1916         s->smart_enabled = 0;
1917         return true;
1918 
1919     case SMART_ENABLE:
1920         s->smart_enabled = 1;
1921         return true;
1922 
1923     case SMART_ATTR_AUTOSAVE:
1924         switch (s->sector) {
1925         case 0x00:
1926             s->smart_autosave = 0;
1927             break;
1928         case 0xf1:
1929             s->smart_autosave = 1;
1930             break;
1931         default:
1932             goto abort_cmd;
1933         }
1934         return true;
1935 
1936     case SMART_STATUS:
1937         if (!s->smart_errors) {
1938             s->hcyl = 0xc2;
1939             s->lcyl = 0x4f;
1940         } else {
1941             s->hcyl = 0x2c;
1942             s->lcyl = 0xf4;
1943         }
1944         return true;
1945 
1946     case SMART_READ_THRESH:
1947         memset(s->io_buffer, 0, 0x200);
1948         s->io_buffer[0] = 0x01; /* smart struct version */
1949 
1950         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1951             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1952             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1953         }
1954 
1955         /* checksum */
1956         for (n = 0; n < 511; n++) {
1957             s->io_buffer[511] += s->io_buffer[n];
1958         }
1959         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1960 
1961         s->status = READY_STAT | SEEK_STAT;
1962         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1963         ide_bus_set_irq(s->bus);
1964         return false;
1965 
1966     case SMART_READ_DATA:
1967         memset(s->io_buffer, 0, 0x200);
1968         s->io_buffer[0] = 0x01; /* smart struct version */
1969 
1970         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1971             int i;
1972             for (i = 0; i < 11; i++) {
1973                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1974             }
1975         }
1976 
1977         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1978         if (s->smart_selftest_count == 0) {
1979             s->io_buffer[363] = 0;
1980         } else {
1981             s->io_buffer[363] =
1982                 s->smart_selftest_data[3 +
1983                            (s->smart_selftest_count - 1) *
1984                            24];
1985         }
1986         s->io_buffer[364] = 0x20;
1987         s->io_buffer[365] = 0x01;
1988         /* offline data collection capacity: execute + self-test*/
1989         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1990         s->io_buffer[368] = 0x03; /* smart capability (1) */
1991         s->io_buffer[369] = 0x00; /* smart capability (2) */
1992         s->io_buffer[370] = 0x01; /* error logging supported */
1993         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1994         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1995         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1996 
1997         for (n = 0; n < 511; n++) {
1998             s->io_buffer[511] += s->io_buffer[n];
1999         }
2000         s->io_buffer[511] = 0x100 - s->io_buffer[511];
2001 
2002         s->status = READY_STAT | SEEK_STAT;
2003         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2004         ide_bus_set_irq(s->bus);
2005         return false;
2006 
2007     case SMART_READ_LOG:
2008         switch (s->sector) {
2009         case 0x01: /* summary smart error log */
2010             memset(s->io_buffer, 0, 0x200);
2011             s->io_buffer[0] = 0x01;
2012             s->io_buffer[1] = 0x00; /* no error entries */
2013             s->io_buffer[452] = s->smart_errors & 0xff;
2014             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
2015 
2016             for (n = 0; n < 511; n++) {
2017                 s->io_buffer[511] += s->io_buffer[n];
2018             }
2019             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2020             break;
2021         case 0x06: /* smart self test log */
2022             memset(s->io_buffer, 0, 0x200);
2023             s->io_buffer[0] = 0x01;
2024             if (s->smart_selftest_count == 0) {
2025                 s->io_buffer[508] = 0;
2026             } else {
2027                 s->io_buffer[508] = s->smart_selftest_count;
2028                 for (n = 2; n < 506; n++)  {
2029                     s->io_buffer[n] = s->smart_selftest_data[n];
2030                 }
2031             }
2032 
2033             for (n = 0; n < 511; n++) {
2034                 s->io_buffer[511] += s->io_buffer[n];
2035             }
2036             s->io_buffer[511] = 0x100 - s->io_buffer[511];
2037             break;
2038         default:
2039             goto abort_cmd;
2040         }
2041         s->status = READY_STAT | SEEK_STAT;
2042         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2043         ide_bus_set_irq(s->bus);
2044         return false;
2045 
2046     case SMART_EXECUTE_OFFLINE:
2047         switch (s->sector) {
2048         case 0: /* off-line routine */
2049         case 1: /* short self test */
2050         case 2: /* extended self test */
2051             s->smart_selftest_count++;
2052             if (s->smart_selftest_count > 21) {
2053                 s->smart_selftest_count = 1;
2054             }
2055             n = 2 + (s->smart_selftest_count - 1) * 24;
2056             s->smart_selftest_data[n] = s->sector;
2057             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2058             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2059             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2060             break;
2061         default:
2062             goto abort_cmd;
2063         }
2064         return true;
2065     }
2066 
2067 abort_cmd:
2068     ide_abort_command(s);
2069     return true;
2070 }
2071 
2072 #define HD_OK (1u << IDE_HD)
2073 #define CD_OK (1u << IDE_CD)
2074 #define CFA_OK (1u << IDE_CFATA)
2075 #define HD_CFA_OK (HD_OK | CFA_OK)
2076 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2077 
2078 /* Set the Disk Seek Completed status bit during completion */
2079 #define SET_DSC (1u << 8)
2080 
2081 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2082 static const struct {
2083     /* Returns true if the completion code should be run */
2084     bool (*handler)(IDEState *s, uint8_t cmd);
2085     int flags;
2086 } ide_cmd_table[0x100] = {
2087     /* NOP not implemented, mandatory for CD */
2088     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
2089     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
2090     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
2091     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
2092     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
2093     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
2094     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
2095     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
2096     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2097     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
2098     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
2099     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
2100     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
2101     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
2102     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
2103     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
2104     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
2105     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
2106     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
2107     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
2108     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
2109     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
2110     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
2111     [WIN_SPECIFY]                 = { cmd_specify, HD_CFA_OK | SET_DSC },
2112     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
2113     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
2114     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
2115     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
2116     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2117     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
2118     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
2119     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
2120     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
2121     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2122     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2123     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
2124     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
2125     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2126     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
2127     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
2128     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
2129     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
2130     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
2131     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
2132     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
2133     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
2134     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
2135     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2136     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
2137     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
2138     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
2139     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
2140     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
2141     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2142     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2143     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2144 };
2145 
ide_cmd_permitted(IDEState * s,uint32_t cmd)2146 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2147 {
2148     return cmd < ARRAY_SIZE(ide_cmd_table)
2149         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2150 }
2151 
ide_bus_exec_cmd(IDEBus * bus,uint32_t val)2152 void ide_bus_exec_cmd(IDEBus *bus, uint32_t val)
2153 {
2154     IDEState *s;
2155     bool complete;
2156 
2157     s = ide_bus_active_if(bus);
2158     trace_ide_bus_exec_cmd(bus, s, val);
2159 
2160     /* ignore commands to non existent slave */
2161     if (s != bus->ifs && !s->blk) {
2162         return;
2163     }
2164 
2165     /* Only RESET is allowed while BSY and/or DRQ are set,
2166      * and only to ATAPI devices. */
2167     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2168         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2169             return;
2170         }
2171     }
2172 
2173     if (!ide_cmd_permitted(s, val)) {
2174         ide_abort_command(s);
2175         ide_bus_set_irq(s->bus);
2176         return;
2177     }
2178 
2179     s->status = READY_STAT | BUSY_STAT;
2180     s->error = 0;
2181     s->io_buffer_offset = 0;
2182 
2183     complete = ide_cmd_table[val].handler(s, val);
2184     if (complete) {
2185         s->status &= ~BUSY_STAT;
2186         assert(!!s->error == !!(s->status & ERR_STAT));
2187 
2188         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2189             s->status |= SEEK_STAT;
2190         }
2191 
2192         ide_cmd_done(s);
2193         ide_bus_set_irq(s->bus);
2194     }
2195 }
2196 
2197 /* IOport [R]ead [R]egisters */
2198 enum ATA_IOPORT_RR {
2199     ATA_IOPORT_RR_DATA = 0,
2200     ATA_IOPORT_RR_ERROR = 1,
2201     ATA_IOPORT_RR_SECTOR_COUNT = 2,
2202     ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2203     ATA_IOPORT_RR_CYLINDER_LOW = 4,
2204     ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2205     ATA_IOPORT_RR_DEVICE_HEAD = 6,
2206     ATA_IOPORT_RR_STATUS = 7,
2207     ATA_IOPORT_RR_NUM_REGISTERS,
2208 };
2209 
2210 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2211     [ATA_IOPORT_RR_DATA] = "Data",
2212     [ATA_IOPORT_RR_ERROR] = "Error",
2213     [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2214     [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2215     [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2216     [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2217     [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2218     [ATA_IOPORT_RR_STATUS] = "Status"
2219 };
2220 
ide_ioport_read(void * opaque,uint32_t addr)2221 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2222 {
2223     IDEBus *bus = opaque;
2224     IDEState *s = ide_bus_active_if(bus);
2225     uint32_t reg_num;
2226     int ret, hob;
2227 
2228     reg_num = addr & 7;
2229     hob = bus->cmd & (IDE_CTRL_HOB);
2230     switch (reg_num) {
2231     case ATA_IOPORT_RR_DATA:
2232         /*
2233          * The pre-GRUB Solaris x86 bootloader relies upon inb
2234          * consuming a word from the drive's sector buffer.
2235          */
2236         ret = ide_data_readw(bus, addr) & 0xff;
2237         break;
2238     case ATA_IOPORT_RR_ERROR:
2239         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2240             (s != bus->ifs && !s->blk)) {
2241             ret = 0;
2242         } else if (!hob) {
2243             ret = s->error;
2244         } else {
2245             ret = s->hob_feature;
2246         }
2247         break;
2248     case ATA_IOPORT_RR_SECTOR_COUNT:
2249         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2250             ret = 0;
2251         } else if (!hob) {
2252             ret = s->nsector & 0xff;
2253         } else {
2254             ret = s->hob_nsector;
2255         }
2256         break;
2257     case ATA_IOPORT_RR_SECTOR_NUMBER:
2258         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2259             ret = 0;
2260         } else if (!hob) {
2261             ret = s->sector;
2262         } else {
2263             ret = s->hob_sector;
2264         }
2265         break;
2266     case ATA_IOPORT_RR_CYLINDER_LOW:
2267         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2268             ret = 0;
2269         } else if (!hob) {
2270             ret = s->lcyl;
2271         } else {
2272             ret = s->hob_lcyl;
2273         }
2274         break;
2275     case ATA_IOPORT_RR_CYLINDER_HIGH:
2276         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2277             ret = 0;
2278         } else if (!hob) {
2279             ret = s->hcyl;
2280         } else {
2281             ret = s->hob_hcyl;
2282         }
2283         break;
2284     case ATA_IOPORT_RR_DEVICE_HEAD:
2285         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2286             ret = 0;
2287         } else {
2288             ret = s->select;
2289         }
2290         break;
2291     default:
2292     case ATA_IOPORT_RR_STATUS:
2293         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2294             (s != bus->ifs && !s->blk)) {
2295             ret = 0;
2296         } else {
2297             ret = s->status;
2298         }
2299         qemu_irq_lower(bus->irq);
2300         break;
2301     }
2302 
2303     trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2304     return ret;
2305 }
2306 
ide_status_read(void * opaque,uint32_t addr)2307 uint32_t ide_status_read(void *opaque, uint32_t addr)
2308 {
2309     IDEBus *bus = opaque;
2310     IDEState *s = ide_bus_active_if(bus);
2311     int ret;
2312 
2313     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2314         (s != bus->ifs && !s->blk)) {
2315         ret = 0;
2316     } else {
2317         ret = s->status;
2318     }
2319 
2320     trace_ide_status_read(addr, ret, bus, s);
2321     return ret;
2322 }
2323 
ide_perform_srst(IDEState * s)2324 static void ide_perform_srst(IDEState *s)
2325 {
2326     s->status |= BUSY_STAT;
2327 
2328     /* Halt PIO (Via register state); PIO BH remains scheduled. */
2329     ide_transfer_halt(s);
2330 
2331     /* Cancel DMA -- may drain block device and invoke callbacks */
2332     ide_cancel_dma_sync(s);
2333 
2334     /* Cancel PIO callback, reset registers/signature, etc */
2335     ide_reset(s);
2336 
2337     /* perform diagnostic */
2338     cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2339 }
2340 
ide_bus_perform_srst(void * opaque)2341 static void ide_bus_perform_srst(void *opaque)
2342 {
2343     IDEBus *bus = opaque;
2344     IDEState *s;
2345     int i;
2346 
2347     for (i = 0; i < 2; i++) {
2348         s = &bus->ifs[i];
2349         ide_perform_srst(s);
2350     }
2351 
2352     bus->cmd &= ~IDE_CTRL_RESET;
2353 }
2354 
ide_ctrl_write(void * opaque,uint32_t addr,uint32_t val)2355 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2356 {
2357     IDEBus *bus = opaque;
2358     IDEState *s;
2359     int i;
2360 
2361     trace_ide_ctrl_write(addr, val, bus);
2362 
2363     /* Device0 and Device1 each have their own control register,
2364      * but QEMU models it as just one register in the controller. */
2365     if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2366         for (i = 0; i < 2; i++) {
2367             s = &bus->ifs[i];
2368             s->status |= BUSY_STAT;
2369         }
2370         replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2371                                          ide_bus_perform_srst, bus);
2372     }
2373 
2374     bus->cmd = val;
2375 }
2376 
2377 /*
2378  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2379  * transferred from the device to the guest), false if it's a PIO in
2380  */
ide_is_pio_out(IDEState * s)2381 static bool ide_is_pio_out(IDEState *s)
2382 {
2383     if (s->end_transfer_func == ide_sector_write ||
2384         s->end_transfer_func == ide_atapi_cmd) {
2385         return false;
2386     } else if (s->end_transfer_func == ide_sector_read ||
2387                s->end_transfer_func == ide_transfer_stop ||
2388                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2389                s->end_transfer_func == ide_dummy_transfer_stop) {
2390         return true;
2391     }
2392 
2393     abort();
2394 }
2395 
ide_data_writew(void * opaque,uint32_t addr,uint32_t val)2396 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2397 {
2398     IDEBus *bus = opaque;
2399     IDEState *s = ide_bus_active_if(bus);
2400     uint8_t *p;
2401 
2402     trace_ide_data_writew(addr, val, bus, s);
2403 
2404     /* PIO data access allowed only when DRQ bit is set. The result of a write
2405      * during PIO out is indeterminate, just ignore it. */
2406     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2407         return;
2408     }
2409 
2410     p = s->data_ptr;
2411     if (s->io8) {
2412         if (p + 1 > s->data_end) {
2413             return;
2414         }
2415 
2416         *p++ = val;
2417     } else {
2418         if (p + 2 > s->data_end) {
2419             return;
2420         }
2421 
2422         *(uint16_t *)p = le16_to_cpu(val);
2423         p += 2;
2424     }
2425     s->data_ptr = p;
2426     if (p >= s->data_end) {
2427         s->status &= ~DRQ_STAT;
2428         s->end_transfer_func(s);
2429     }
2430 }
2431 
ide_data_readw(void * opaque,uint32_t addr)2432 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2433 {
2434     IDEBus *bus = opaque;
2435     IDEState *s = ide_bus_active_if(bus);
2436     uint8_t *p;
2437     int ret;
2438 
2439     /* PIO data access allowed only when DRQ bit is set. The result of a read
2440      * during PIO in is indeterminate, return 0 and don't move forward. */
2441     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2442         return 0;
2443     }
2444 
2445     p = s->data_ptr;
2446     if (s->io8) {
2447         if (p + 1 > s->data_end) {
2448             return 0;
2449         }
2450 
2451         ret = *p++;
2452     } else {
2453         if (p + 2 > s->data_end) {
2454             return 0;
2455         }
2456 
2457         ret = cpu_to_le16(*(uint16_t *)p);
2458         p += 2;
2459     }
2460     s->data_ptr = p;
2461     if (p >= s->data_end) {
2462         s->status &= ~DRQ_STAT;
2463         s->end_transfer_func(s);
2464     }
2465 
2466     trace_ide_data_readw(addr, ret, bus, s);
2467     return ret;
2468 }
2469 
ide_data_writel(void * opaque,uint32_t addr,uint32_t val)2470 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2471 {
2472     IDEBus *bus = opaque;
2473     IDEState *s = ide_bus_active_if(bus);
2474     uint8_t *p;
2475 
2476     trace_ide_data_writel(addr, val, bus, s);
2477 
2478     /* PIO data access allowed only when DRQ bit is set. The result of a write
2479      * during PIO out is indeterminate, just ignore it. */
2480     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2481         return;
2482     }
2483 
2484     p = s->data_ptr;
2485     if (p + 4 > s->data_end) {
2486         return;
2487     }
2488 
2489     *(uint32_t *)p = le32_to_cpu(val);
2490     p += 4;
2491     s->data_ptr = p;
2492     if (p >= s->data_end) {
2493         s->status &= ~DRQ_STAT;
2494         s->end_transfer_func(s);
2495     }
2496 }
2497 
ide_data_readl(void * opaque,uint32_t addr)2498 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2499 {
2500     IDEBus *bus = opaque;
2501     IDEState *s = ide_bus_active_if(bus);
2502     uint8_t *p;
2503     int ret;
2504 
2505     /* PIO data access allowed only when DRQ bit is set. The result of a read
2506      * during PIO in is indeterminate, return 0 and don't move forward. */
2507     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2508         ret = 0;
2509         goto out;
2510     }
2511 
2512     p = s->data_ptr;
2513     if (p + 4 > s->data_end) {
2514         return 0;
2515     }
2516 
2517     ret = cpu_to_le32(*(uint32_t *)p);
2518     p += 4;
2519     s->data_ptr = p;
2520     if (p >= s->data_end) {
2521         s->status &= ~DRQ_STAT;
2522         s->end_transfer_func(s);
2523     }
2524 
2525 out:
2526     trace_ide_data_readl(addr, ret, bus, s);
2527     return ret;
2528 }
2529 
ide_dummy_transfer_stop(IDEState * s)2530 static void ide_dummy_transfer_stop(IDEState *s)
2531 {
2532     s->data_ptr = s->io_buffer;
2533     s->data_end = s->io_buffer;
2534     s->io_buffer[0] = 0xff;
2535     s->io_buffer[1] = 0xff;
2536     s->io_buffer[2] = 0xff;
2537     s->io_buffer[3] = 0xff;
2538 }
2539 
ide_bus_reset(IDEBus * bus)2540 void ide_bus_reset(IDEBus *bus)
2541 {
2542     /* pending async DMA - needs the IDEState before it is reset */
2543     if (bus->dma->aiocb) {
2544         trace_ide_bus_reset_aio();
2545         blk_aio_cancel(bus->dma->aiocb);
2546         bus->dma->aiocb = NULL;
2547     }
2548 
2549     bus->unit = 0;
2550     bus->cmd = 0;
2551     ide_reset(&bus->ifs[0]);
2552     ide_reset(&bus->ifs[1]);
2553     ide_clear_hob(bus);
2554 
2555     /* reset dma provider too */
2556     if (bus->dma->ops->reset) {
2557         bus->dma->ops->reset(bus->dma);
2558     }
2559 }
2560 
ide_cd_is_tray_open(void * opaque)2561 static bool ide_cd_is_tray_open(void *opaque)
2562 {
2563     return ((IDEState *)opaque)->tray_open;
2564 }
2565 
ide_cd_is_medium_locked(void * opaque)2566 static bool ide_cd_is_medium_locked(void *opaque)
2567 {
2568     return ((IDEState *)opaque)->tray_locked;
2569 }
2570 
ide_resize_cb(void * opaque)2571 static void ide_resize_cb(void *opaque)
2572 {
2573     IDEState *s = opaque;
2574     uint64_t nb_sectors;
2575 
2576     if (!s->identify_set) {
2577         return;
2578     }
2579 
2580     blk_get_geometry(s->blk, &nb_sectors);
2581     s->nb_sectors = nb_sectors;
2582 
2583     /* Update the identify data buffer. */
2584     if (s->drive_kind == IDE_CFATA) {
2585         ide_cfata_identify_size(s);
2586     } else {
2587         /* IDE_CD uses a different set of callbacks entirely. */
2588         assert(s->drive_kind != IDE_CD);
2589         ide_identify_size(s);
2590     }
2591 }
2592 
2593 static const BlockDevOps ide_cd_block_ops = {
2594     .change_media_cb = ide_cd_change_cb,
2595     .eject_request_cb = ide_cd_eject_request_cb,
2596     .is_tray_open = ide_cd_is_tray_open,
2597     .is_medium_locked = ide_cd_is_medium_locked,
2598 };
2599 
2600 static const BlockDevOps ide_hd_block_ops = {
2601     .resize_cb = ide_resize_cb,
2602 };
2603 
ide_init_drive(IDEState * s,IDEDevice * dev,IDEDriveKind kind,Error ** errp)2604 int ide_init_drive(IDEState *s, IDEDevice *dev, IDEDriveKind kind, Error **errp)
2605 {
2606     uint64_t nb_sectors;
2607 
2608     s->blk = dev->conf.blk;
2609     s->drive_kind = kind;
2610 
2611     blk_get_geometry(s->blk, &nb_sectors);
2612     s->win2k_install_hack = dev->win2k_install_hack;
2613     s->cylinders = dev->conf.cyls;
2614     s->heads = s->drive_heads = dev->conf.heads;
2615     s->sectors = s->drive_sectors = dev->conf.secs;
2616     s->chs_trans = dev->chs_trans;
2617     s->nb_sectors = nb_sectors;
2618     s->wwn = dev->wwn;
2619     /* The SMART values should be preserved across power cycles
2620        but they aren't.  */
2621     s->smart_enabled = 1;
2622     s->smart_autosave = 1;
2623     s->smart_errors = 0;
2624     s->smart_selftest_count = 0;
2625     if (kind == IDE_CD) {
2626         blk_set_dev_ops(s->blk, &ide_cd_block_ops, s);
2627     } else {
2628         if (!blk_is_inserted(s->blk)) {
2629             error_setg(errp, "Device needs media, but drive is empty");
2630             return -1;
2631         }
2632         if (!blk_is_writable(s->blk)) {
2633             error_setg(errp, "Can't use a read-only drive");
2634             return -1;
2635         }
2636         blk_set_dev_ops(s->blk, &ide_hd_block_ops, s);
2637     }
2638     if (dev->serial) {
2639         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), dev->serial);
2640     } else {
2641         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2642                  "QM%05d", s->drive_serial);
2643     }
2644     if (dev->model) {
2645         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), dev->model);
2646     } else {
2647         switch (kind) {
2648         case IDE_CD:
2649             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2650             break;
2651         case IDE_CFATA:
2652             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2653             break;
2654         default:
2655             strcpy(s->drive_model_str, "QEMU HARDDISK");
2656             break;
2657         }
2658     }
2659 
2660     if (dev->version) {
2661         pstrcpy(s->version, sizeof(s->version), dev->version);
2662     } else {
2663         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2664     }
2665 
2666     ide_reset(s);
2667     blk_iostatus_enable(s->blk);
2668     return 0;
2669 }
2670 
ide_init1(IDEBus * bus,int unit)2671 static void ide_init1(IDEBus *bus, int unit)
2672 {
2673     static int drive_serial = 1;
2674     IDEState *s = &bus->ifs[unit];
2675 
2676     s->bus = bus;
2677     s->unit = unit;
2678     s->drive_serial = drive_serial++;
2679     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2680     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2681     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2682     memset(s->io_buffer, 0, s->io_buffer_total_len);
2683 
2684     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2685     memset(s->smart_selftest_data, 0, 512);
2686 
2687     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2688                                            ide_sector_write_timer_cb, s);
2689 }
2690 
ide_nop_int(const IDEDMA * dma,bool is_write)2691 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2692 {
2693     return 0;
2694 }
2695 
ide_nop(const IDEDMA * dma)2696 static void ide_nop(const IDEDMA *dma)
2697 {
2698 }
2699 
ide_nop_int32(const IDEDMA * dma,int32_t l)2700 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2701 {
2702     return 0;
2703 }
2704 
2705 static const IDEDMAOps ide_dma_nop_ops = {
2706     .prepare_buf    = ide_nop_int32,
2707     .restart_dma    = ide_nop,
2708     .rw_buf         = ide_nop_int,
2709 };
2710 
ide_restart_dma(IDEState * s,enum ide_dma_cmd dma_cmd)2711 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2712 {
2713     s->unit = s->bus->retry_unit;
2714     ide_set_sector(s, s->bus->retry_sector_num);
2715     s->nsector = s->bus->retry_nsector;
2716     s->bus->dma->ops->restart_dma(s->bus->dma);
2717     s->io_buffer_size = 0;
2718     s->dma_cmd = dma_cmd;
2719     ide_start_dma(s, ide_dma_cb);
2720 }
2721 
ide_restart_bh(void * opaque)2722 static void ide_restart_bh(void *opaque)
2723 {
2724     IDEBus *bus = opaque;
2725     IDEState *s;
2726     bool is_read;
2727     int error_status;
2728 
2729     qemu_bh_delete(bus->bh);
2730     bus->bh = NULL;
2731 
2732     error_status = bus->error_status;
2733     if (bus->error_status == 0) {
2734         return;
2735     }
2736 
2737     s = ide_bus_active_if(bus);
2738     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2739 
2740     /* The error status must be cleared before resubmitting the request: The
2741      * request may fail again, and this case can only be distinguished if the
2742      * called function can set a new error status. */
2743     bus->error_status = 0;
2744 
2745     /* The HBA has generically asked to be kicked on retry */
2746     if (error_status & IDE_RETRY_HBA) {
2747         if (s->bus->dma->ops->restart) {
2748             s->bus->dma->ops->restart(s->bus->dma);
2749         }
2750     } else if (IS_IDE_RETRY_DMA(error_status)) {
2751         if (error_status & IDE_RETRY_TRIM) {
2752             ide_restart_dma(s, IDE_DMA_TRIM);
2753         } else {
2754             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2755         }
2756     } else if (IS_IDE_RETRY_PIO(error_status)) {
2757         if (is_read) {
2758             ide_sector_read(s);
2759         } else {
2760             ide_sector_write(s);
2761         }
2762     } else if (error_status & IDE_RETRY_FLUSH) {
2763         ide_flush_cache(s);
2764     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2765         assert(s->end_transfer_func == ide_atapi_cmd);
2766         ide_atapi_dma_restart(s);
2767     } else {
2768         abort();
2769     }
2770 }
2771 
ide_restart_cb(void * opaque,bool running,RunState state)2772 static void ide_restart_cb(void *opaque, bool running, RunState state)
2773 {
2774     IDEBus *bus = opaque;
2775 
2776     if (!running)
2777         return;
2778 
2779     if (!bus->bh) {
2780         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2781         qemu_bh_schedule(bus->bh);
2782     }
2783 }
2784 
ide_bus_register_restart_cb(IDEBus * bus)2785 void ide_bus_register_restart_cb(IDEBus *bus)
2786 {
2787     if (bus->dma->ops->restart_dma) {
2788         bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2789     }
2790 }
2791 
2792 static IDEDMA ide_dma_nop = {
2793     .ops = &ide_dma_nop_ops,
2794     .aiocb = NULL,
2795 };
2796 
ide_bus_init_output_irq(IDEBus * bus,qemu_irq irq_out)2797 void ide_bus_init_output_irq(IDEBus *bus, qemu_irq irq_out)
2798 {
2799     int i;
2800 
2801     for(i = 0; i < 2; i++) {
2802         ide_init1(bus, i);
2803         ide_reset(&bus->ifs[i]);
2804     }
2805     bus->irq = irq_out;
2806     bus->dma = &ide_dma_nop;
2807 }
2808 
ide_bus_set_irq(IDEBus * bus)2809 void ide_bus_set_irq(IDEBus *bus)
2810 {
2811     if (!(bus->cmd & IDE_CTRL_DISABLE_IRQ)) {
2812         qemu_irq_raise(bus->irq);
2813     }
2814 }
2815 
ide_exit(IDEState * s)2816 void ide_exit(IDEState *s)
2817 {
2818     timer_free(s->sector_write_timer);
2819     qemu_vfree(s->smart_selftest_data);
2820     qemu_vfree(s->io_buffer);
2821 }
2822 
is_identify_set(void * opaque,int version_id)2823 static bool is_identify_set(void *opaque, int version_id)
2824 {
2825     IDEState *s = opaque;
2826 
2827     return s->identify_set != 0;
2828 }
2829 
2830 static EndTransferFunc* transfer_end_table[] = {
2831         ide_sector_read,
2832         ide_sector_write,
2833         ide_transfer_stop,
2834         ide_atapi_cmd_reply_end,
2835         ide_atapi_cmd,
2836         ide_dummy_transfer_stop,
2837 };
2838 
transfer_end_table_idx(EndTransferFunc * fn)2839 static int transfer_end_table_idx(EndTransferFunc *fn)
2840 {
2841     int i;
2842 
2843     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2844         if (transfer_end_table[i] == fn)
2845             return i;
2846 
2847     return -1;
2848 }
2849 
ide_drive_post_load(void * opaque,int version_id)2850 static int ide_drive_post_load(void *opaque, int version_id)
2851 {
2852     IDEState *s = opaque;
2853 
2854     if (s->blk && s->identify_set) {
2855         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2856     }
2857     return 0;
2858 }
2859 
ide_drive_pio_post_load(void * opaque,int version_id)2860 static int ide_drive_pio_post_load(void *opaque, int version_id)
2861 {
2862     IDEState *s = opaque;
2863 
2864     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2865         return -EINVAL;
2866     }
2867     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2868     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2869     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2870     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2871 
2872     return 0;
2873 }
2874 
ide_drive_pio_pre_save(void * opaque)2875 static int ide_drive_pio_pre_save(void *opaque)
2876 {
2877     IDEState *s = opaque;
2878     int idx;
2879 
2880     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2881     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2882 
2883     idx = transfer_end_table_idx(s->end_transfer_func);
2884     if (idx == -1) {
2885         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2886                         __func__);
2887         s->end_transfer_fn_idx = 2;
2888     } else {
2889         s->end_transfer_fn_idx = idx;
2890     }
2891 
2892     return 0;
2893 }
2894 
ide_drive_pio_state_needed(void * opaque)2895 static bool ide_drive_pio_state_needed(void *opaque)
2896 {
2897     IDEState *s = opaque;
2898 
2899     return ((s->status & DRQ_STAT) != 0)
2900         || (s->bus->error_status & IDE_RETRY_PIO);
2901 }
2902 
ide_tray_state_needed(void * opaque)2903 static bool ide_tray_state_needed(void *opaque)
2904 {
2905     IDEState *s = opaque;
2906 
2907     return s->tray_open || s->tray_locked;
2908 }
2909 
ide_atapi_gesn_needed(void * opaque)2910 static bool ide_atapi_gesn_needed(void *opaque)
2911 {
2912     IDEState *s = opaque;
2913 
2914     return s->events.new_media || s->events.eject_request;
2915 }
2916 
ide_error_needed(void * opaque)2917 static bool ide_error_needed(void *opaque)
2918 {
2919     IDEBus *bus = opaque;
2920 
2921     return (bus->error_status != 0);
2922 }
2923 
2924 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2925 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2926     .name ="ide_drive/atapi/gesn_state",
2927     .version_id = 1,
2928     .minimum_version_id = 1,
2929     .needed = ide_atapi_gesn_needed,
2930     .fields = (const VMStateField[]) {
2931         VMSTATE_BOOL(events.new_media, IDEState),
2932         VMSTATE_BOOL(events.eject_request, IDEState),
2933         VMSTATE_END_OF_LIST()
2934     }
2935 };
2936 
2937 static const VMStateDescription vmstate_ide_tray_state = {
2938     .name = "ide_drive/tray_state",
2939     .version_id = 1,
2940     .minimum_version_id = 1,
2941     .needed = ide_tray_state_needed,
2942     .fields = (const VMStateField[]) {
2943         VMSTATE_BOOL(tray_open, IDEState),
2944         VMSTATE_BOOL(tray_locked, IDEState),
2945         VMSTATE_END_OF_LIST()
2946     }
2947 };
2948 
2949 static const VMStateDescription vmstate_ide_drive_pio_state = {
2950     .name = "ide_drive/pio_state",
2951     .version_id = 1,
2952     .minimum_version_id = 1,
2953     .pre_save = ide_drive_pio_pre_save,
2954     .post_load = ide_drive_pio_post_load,
2955     .needed = ide_drive_pio_state_needed,
2956     .fields = (const VMStateField[]) {
2957         VMSTATE_INT32(req_nb_sectors, IDEState),
2958         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2959                              vmstate_info_uint8, uint8_t),
2960         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2961         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2962         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2963         VMSTATE_INT32(elementary_transfer_size, IDEState),
2964         VMSTATE_INT32(packet_transfer_size, IDEState),
2965         VMSTATE_END_OF_LIST()
2966     }
2967 };
2968 
2969 const VMStateDescription vmstate_ide_drive = {
2970     .name = "ide_drive",
2971     .version_id = 3,
2972     .minimum_version_id = 0,
2973     .post_load = ide_drive_post_load,
2974     .fields = (const VMStateField[]) {
2975         VMSTATE_INT32(mult_sectors, IDEState),
2976         VMSTATE_INT32(identify_set, IDEState),
2977         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2978         VMSTATE_UINT8(feature, IDEState),
2979         VMSTATE_UINT8(error, IDEState),
2980         VMSTATE_UINT32(nsector, IDEState),
2981         VMSTATE_UINT8(sector, IDEState),
2982         VMSTATE_UINT8(lcyl, IDEState),
2983         VMSTATE_UINT8(hcyl, IDEState),
2984         VMSTATE_UINT8(hob_feature, IDEState),
2985         VMSTATE_UINT8(hob_sector, IDEState),
2986         VMSTATE_UINT8(hob_nsector, IDEState),
2987         VMSTATE_UINT8(hob_lcyl, IDEState),
2988         VMSTATE_UINT8(hob_hcyl, IDEState),
2989         VMSTATE_UINT8(select, IDEState),
2990         VMSTATE_UINT8(status, IDEState),
2991         VMSTATE_UINT8(lba48, IDEState),
2992         VMSTATE_UINT8(sense_key, IDEState),
2993         VMSTATE_UINT8(asc, IDEState),
2994         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2995         VMSTATE_END_OF_LIST()
2996     },
2997     .subsections = (const VMStateDescription * const []) {
2998         &vmstate_ide_drive_pio_state,
2999         &vmstate_ide_tray_state,
3000         &vmstate_ide_atapi_gesn_state,
3001         NULL
3002     }
3003 };
3004 
3005 static const VMStateDescription vmstate_ide_error_status = {
3006     .name ="ide_bus/error",
3007     .version_id = 2,
3008     .minimum_version_id = 1,
3009     .needed = ide_error_needed,
3010     .fields = (const VMStateField[]) {
3011         VMSTATE_INT32(error_status, IDEBus),
3012         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
3013         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
3014         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
3015         VMSTATE_END_OF_LIST()
3016     }
3017 };
3018 
3019 const VMStateDescription vmstate_ide_bus = {
3020     .name = "ide_bus",
3021     .version_id = 1,
3022     .minimum_version_id = 1,
3023     .fields = (const VMStateField[]) {
3024         VMSTATE_UINT8(cmd, IDEBus),
3025         VMSTATE_UINT8(unit, IDEBus),
3026         VMSTATE_END_OF_LIST()
3027     },
3028     .subsections = (const VMStateDescription * const []) {
3029         &vmstate_ide_error_status,
3030         NULL
3031     }
3032 };
3033 
ide_drive_get(DriveInfo ** hd,int n)3034 void ide_drive_get(DriveInfo **hd, int n)
3035 {
3036     int i;
3037 
3038     for (i = 0; i < n; i++) {
3039         hd[i] = drive_get_by_index(IF_IDE, i);
3040     }
3041 }
3042