xref: /qemu/hw/scsi/esp.c (revision a034765161b22fdd12d51ec7a30e65750e821107)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98         s->async_len = 0;
99     }
100 }
101 
102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103 {
104     if (fifo8_num_used(fifo) == fifo->capacity) {
105         trace_esp_error_fifo_overrun();
106         return;
107     }
108 
109     fifo8_push(fifo, val);
110 }
111 
112 static uint8_t esp_fifo_pop(Fifo8 *fifo)
113 {
114     if (fifo8_is_empty(fifo)) {
115         return 0;
116     }
117 
118     return fifo8_pop(fifo);
119 }
120 
121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122 {
123     const uint8_t *buf;
124     uint32_t n, n2;
125     int len;
126 
127     if (maxlen == 0) {
128         return 0;
129     }
130 
131     len = maxlen;
132     buf = fifo8_pop_buf(fifo, len, &n);
133     if (dest) {
134         memcpy(dest, buf, n);
135     }
136 
137     /* Add FIFO wraparound if needed */
138     len -= n;
139     len = MIN(len, fifo8_num_used(fifo));
140     if (len) {
141         buf = fifo8_pop_buf(fifo, len, &n2);
142         if (dest) {
143             memcpy(&dest[n], buf, n2);
144         }
145         n += n2;
146     }
147 
148     return n;
149 }
150 
151 static uint32_t esp_get_tc(ESPState *s)
152 {
153     uint32_t dmalen;
154 
155     dmalen = s->rregs[ESP_TCLO];
156     dmalen |= s->rregs[ESP_TCMID] << 8;
157     dmalen |= s->rregs[ESP_TCHI] << 16;
158 
159     return dmalen;
160 }
161 
162 static void esp_set_tc(ESPState *s, uint32_t dmalen)
163 {
164     uint32_t old_tc = esp_get_tc(s);
165 
166     s->rregs[ESP_TCLO] = dmalen;
167     s->rregs[ESP_TCMID] = dmalen >> 8;
168     s->rregs[ESP_TCHI] = dmalen >> 16;
169 
170     if (old_tc && dmalen == 0) {
171         s->rregs[ESP_RSTAT] |= STAT_TC;
172     }
173 }
174 
175 static uint32_t esp_get_stc(ESPState *s)
176 {
177     uint32_t dmalen;
178 
179     dmalen = s->wregs[ESP_TCLO];
180     dmalen |= s->wregs[ESP_TCMID] << 8;
181     dmalen |= s->wregs[ESP_TCHI] << 16;
182 
183     return dmalen;
184 }
185 
186 static uint8_t esp_pdma_read(ESPState *s)
187 {
188     uint8_t val;
189 
190     val = esp_fifo_pop(&s->fifo);
191     return val;
192 }
193 
194 static void esp_pdma_write(ESPState *s, uint8_t val)
195 {
196     uint32_t dmalen = esp_get_tc(s);
197 
198     if (dmalen == 0) {
199         return;
200     }
201 
202     esp_fifo_push(&s->fifo, val);
203 
204     dmalen--;
205     esp_set_tc(s, dmalen);
206 }
207 
208 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
209 {
210     s->pdma_cb = cb;
211 }
212 
213 static int esp_select(ESPState *s)
214 {
215     int target;
216 
217     target = s->wregs[ESP_WBUSID] & BUSID_DID;
218 
219     s->ti_size = 0;
220 
221     if (s->current_req) {
222         /* Started a new command before the old one finished. Cancel it. */
223         scsi_req_cancel(s->current_req);
224     }
225 
226     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
227     if (!s->current_dev) {
228         /* No such drive */
229         s->rregs[ESP_RSTAT] = 0;
230         s->rregs[ESP_RINTR] = INTR_DC;
231         s->rregs[ESP_RSEQ] = SEQ_0;
232         esp_raise_irq(s);
233         return -1;
234     }
235 
236     /*
237      * Note that we deliberately don't raise the IRQ here: this will be done
238      * either in do_command_phase() for DATA OUT transfers or by the deferred
239      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
240      */
241     s->rregs[ESP_RINTR] |= INTR_FC;
242     s->rregs[ESP_RSEQ] = SEQ_CD;
243     return 0;
244 }
245 
246 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
247 {
248     uint8_t buf[ESP_CMDFIFO_SZ];
249     uint32_t dmalen, n;
250     int target;
251 
252     target = s->wregs[ESP_WBUSID] & BUSID_DID;
253     if (s->dma) {
254         dmalen = MIN(esp_get_tc(s), maxlen);
255         if (dmalen == 0) {
256             return 0;
257         }
258         if (s->dma_memory_read) {
259             s->dma_memory_read(s->dma_opaque, buf, dmalen);
260             dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
261             fifo8_push_all(&s->cmdfifo, buf, dmalen);
262             esp_set_tc(s, esp_get_tc(s) - dmalen);
263         } else {
264             return 0;
265         }
266     } else {
267         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
268         if (dmalen == 0) {
269             return 0;
270         }
271         n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
272         n = MIN(fifo8_num_free(&s->cmdfifo), n);
273         fifo8_push_all(&s->cmdfifo, buf, n);
274     }
275     trace_esp_get_cmd(dmalen, target);
276 
277     return dmalen;
278 }
279 
280 static void do_command_phase(ESPState *s)
281 {
282     uint32_t cmdlen;
283     int32_t datalen;
284     SCSIDevice *current_lun;
285     uint8_t buf[ESP_CMDFIFO_SZ];
286 
287     trace_esp_do_command_phase(s->lun);
288     cmdlen = fifo8_num_used(&s->cmdfifo);
289     if (!cmdlen || !s->current_dev) {
290         return;
291     }
292     esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
293 
294     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
295     if (!current_lun) {
296         /* No such drive */
297         s->rregs[ESP_RSTAT] = 0;
298         s->rregs[ESP_RINTR] = INTR_DC;
299         s->rregs[ESP_RSEQ] = SEQ_0;
300         esp_raise_irq(s);
301         return;
302     }
303 
304     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
305     datalen = scsi_req_enqueue(s->current_req);
306     s->ti_size = datalen;
307     fifo8_reset(&s->cmdfifo);
308     if (datalen != 0) {
309         s->rregs[ESP_RSTAT] = STAT_TC;
310         s->rregs[ESP_RSEQ] = SEQ_CD;
311         s->ti_cmd = 0;
312         esp_set_tc(s, 0);
313         if (datalen > 0) {
314             /*
315              * Switch to DATA IN phase but wait until initial data xfer is
316              * complete before raising the command completion interrupt
317              */
318             s->data_in_ready = false;
319             s->rregs[ESP_RSTAT] |= STAT_DI;
320         } else {
321             s->rregs[ESP_RSTAT] |= STAT_DO;
322             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
323             esp_raise_irq(s);
324             esp_lower_drq(s);
325         }
326         scsi_req_continue(s->current_req);
327         return;
328     }
329 }
330 
331 static void do_message_phase(ESPState *s)
332 {
333     if (s->cmdfifo_cdb_offset) {
334         uint8_t message = esp_fifo_pop(&s->cmdfifo);
335 
336         trace_esp_do_identify(message);
337         s->lun = message & 7;
338         s->cmdfifo_cdb_offset--;
339     }
340 
341     /* Ignore extended messages for now */
342     if (s->cmdfifo_cdb_offset) {
343         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
344         esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
345         s->cmdfifo_cdb_offset = 0;
346     }
347 }
348 
349 static void do_cmd(ESPState *s)
350 {
351     do_message_phase(s);
352     assert(s->cmdfifo_cdb_offset == 0);
353     do_command_phase(s);
354 }
355 
356 static void satn_pdma_cb(ESPState *s)
357 {
358     uint8_t buf[ESP_FIFO_SZ];
359     int n;
360 
361     /* Copy FIFO into cmdfifo */
362     n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
363     n = MIN(fifo8_num_free(&s->cmdfifo), n);
364     fifo8_push_all(&s->cmdfifo, buf, n);
365 
366     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
367         s->cmdfifo_cdb_offset = 1;
368         s->do_cmd = 0;
369         do_cmd(s);
370     }
371 }
372 
373 static void handle_satn(ESPState *s)
374 {
375     int32_t cmdlen;
376 
377     if (s->dma && !s->dma_enabled) {
378         s->dma_cb = handle_satn;
379         return;
380     }
381     esp_set_pdma_cb(s, SATN_PDMA_CB);
382     if (esp_select(s) < 0) {
383         return;
384     }
385     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
386     if (cmdlen > 0) {
387         s->cmdfifo_cdb_offset = 1;
388         s->do_cmd = 0;
389         do_cmd(s);
390     } else if (cmdlen == 0) {
391         if (s->dma) {
392             esp_raise_drq(s);
393         }
394         s->do_cmd = 1;
395         /* Target present, but no cmd yet - switch to command phase */
396         s->rregs[ESP_RSEQ] = SEQ_CD;
397         s->rregs[ESP_RSTAT] = STAT_CD;
398     }
399 }
400 
401 static void s_without_satn_pdma_cb(ESPState *s)
402 {
403     uint8_t buf[ESP_FIFO_SZ];
404     int n;
405 
406     /* Copy FIFO into cmdfifo */
407     n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
408     n = MIN(fifo8_num_free(&s->cmdfifo), n);
409     fifo8_push_all(&s->cmdfifo, buf, n);
410 
411     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
412         s->cmdfifo_cdb_offset = 0;
413         s->do_cmd = 0;
414         do_cmd(s);
415     }
416 }
417 
418 static void handle_s_without_atn(ESPState *s)
419 {
420     int32_t cmdlen;
421 
422     if (s->dma && !s->dma_enabled) {
423         s->dma_cb = handle_s_without_atn;
424         return;
425     }
426     esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
427     if (esp_select(s) < 0) {
428         return;
429     }
430     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
431     if (cmdlen > 0) {
432         s->cmdfifo_cdb_offset = 0;
433         s->do_cmd = 0;
434         do_cmd(s);
435     } else if (cmdlen == 0) {
436         if (s->dma) {
437             esp_raise_drq(s);
438         }
439         s->do_cmd = 1;
440         /* Target present, but no cmd yet - switch to command phase */
441         s->rregs[ESP_RSEQ] = SEQ_CD;
442         s->rregs[ESP_RSTAT] = STAT_CD;
443     }
444 }
445 
446 static void satn_stop_pdma_cb(ESPState *s)
447 {
448     uint8_t buf[ESP_FIFO_SZ];
449     int n;
450 
451     /* Copy FIFO into cmdfifo */
452     n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
453     n = MIN(fifo8_num_free(&s->cmdfifo), n);
454     fifo8_push_all(&s->cmdfifo, buf, n);
455 
456     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
457         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
458         s->do_cmd = 1;
459         s->cmdfifo_cdb_offset = 1;
460         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
461         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
462         s->rregs[ESP_RSEQ] = SEQ_CD;
463         esp_raise_irq(s);
464     }
465 }
466 
467 static void handle_satn_stop(ESPState *s)
468 {
469     int32_t cmdlen;
470 
471     if (s->dma && !s->dma_enabled) {
472         s->dma_cb = handle_satn_stop;
473         return;
474     }
475     esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
476     if (esp_select(s) < 0) {
477         return;
478     }
479     cmdlen = get_cmd(s, 1);
480     if (cmdlen > 0) {
481         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
482         s->do_cmd = 1;
483         s->cmdfifo_cdb_offset = 1;
484         s->rregs[ESP_RSTAT] = STAT_MO;
485         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
486         s->rregs[ESP_RSEQ] = SEQ_MO;
487         esp_raise_irq(s);
488     } else if (cmdlen == 0) {
489         if (s->dma) {
490             esp_raise_drq(s);
491         }
492         s->do_cmd = 1;
493         /* Target present, switch to message out phase */
494         s->rregs[ESP_RSEQ] = SEQ_MO;
495         s->rregs[ESP_RSTAT] = STAT_MO;
496     }
497 }
498 
499 static void write_response_pdma_cb(ESPState *s)
500 {
501     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
502     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
503     s->rregs[ESP_RSEQ] = SEQ_CD;
504     esp_raise_irq(s);
505 }
506 
507 static void write_response(ESPState *s)
508 {
509     uint8_t buf[2];
510 
511     trace_esp_write_response(s->status);
512 
513     buf[0] = s->status;
514     buf[1] = 0;
515 
516     if (s->dma) {
517         if (s->dma_memory_write) {
518             s->dma_memory_write(s->dma_opaque, buf, 2);
519             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
520             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
521             s->rregs[ESP_RSEQ] = SEQ_CD;
522         } else {
523             esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
524             esp_raise_drq(s);
525             return;
526         }
527     } else {
528         fifo8_reset(&s->fifo);
529         fifo8_push_all(&s->fifo, buf, 2);
530         s->rregs[ESP_RFLAGS] = 2;
531     }
532     esp_raise_irq(s);
533 }
534 
535 static void esp_dma_done(ESPState *s)
536 {
537     s->rregs[ESP_RSTAT] |= STAT_TC;
538     s->rregs[ESP_RINTR] |= INTR_BS;
539     s->rregs[ESP_RFLAGS] = 0;
540     esp_set_tc(s, 0);
541     esp_raise_irq(s);
542 }
543 
544 static void do_dma_pdma_cb(ESPState *s)
545 {
546     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
547     uint8_t buf[ESP_CMDFIFO_SZ];
548     int len;
549     uint32_t n;
550 
551     if (s->do_cmd) {
552         /* Copy FIFO into cmdfifo */
553         n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
554         n = MIN(fifo8_num_free(&s->cmdfifo), n);
555         fifo8_push_all(&s->cmdfifo, buf, n);
556 
557         /* Ensure we have received complete command after SATN and stop */
558         if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
559             return;
560         }
561 
562         s->ti_size = 0;
563         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
564             /* No command received */
565             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
566                 return;
567             }
568 
569             /* Command has been received */
570             s->do_cmd = 0;
571             do_cmd(s);
572         } else {
573             /*
574              * Extra message out bytes received: update cmdfifo_cdb_offset
575              * and then switch to command phase
576              */
577             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
578             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
579             s->rregs[ESP_RSEQ] = SEQ_CD;
580             s->rregs[ESP_RINTR] |= INTR_BS;
581             esp_raise_irq(s);
582         }
583         return;
584     }
585 
586     if (!s->current_req) {
587         return;
588     }
589 
590     if (to_device) {
591         /* Copy FIFO data to device */
592         len = MIN(s->async_len, ESP_FIFO_SZ);
593         len = MIN(len, fifo8_num_used(&s->fifo));
594         n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
595         s->async_buf += n;
596         s->async_len -= n;
597         s->ti_size += n;
598 
599         if (n < len) {
600             /* Unaligned accesses can cause FIFO wraparound */
601             len = len - n;
602             n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
603             s->async_buf += n;
604             s->async_len -= n;
605             s->ti_size += n;
606         }
607 
608         if (s->async_len == 0) {
609             scsi_req_continue(s->current_req);
610             return;
611         }
612 
613         if (esp_get_tc(s) == 0) {
614             esp_lower_drq(s);
615             esp_dma_done(s);
616         }
617 
618         return;
619     } else {
620         if (s->async_len == 0) {
621             /* Defer until the scsi layer has completed */
622             scsi_req_continue(s->current_req);
623             s->data_in_ready = false;
624             return;
625         }
626 
627         if (esp_get_tc(s) == 0) {
628             esp_lower_drq(s);
629             esp_dma_done(s);
630         }
631 
632         /* Copy device data to FIFO */
633         len = MIN(s->async_len, esp_get_tc(s));
634         len = MIN(len, fifo8_num_free(&s->fifo));
635         fifo8_push_all(&s->fifo, s->async_buf, len);
636         s->async_buf += len;
637         s->async_len -= len;
638         s->ti_size -= len;
639         esp_set_tc(s, esp_get_tc(s) - len);
640     }
641 }
642 
643 static void esp_do_dma(ESPState *s)
644 {
645     uint32_t len, cmdlen;
646     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
647     uint8_t buf[ESP_CMDFIFO_SZ];
648 
649     len = esp_get_tc(s);
650     if (s->do_cmd) {
651         /*
652          * handle_ti_cmd() case: esp_do_dma() is called only from
653          * handle_ti_cmd() with do_cmd != NULL (see the assert())
654          */
655         cmdlen = fifo8_num_used(&s->cmdfifo);
656         trace_esp_do_dma(cmdlen, len);
657         if (s->dma_memory_read) {
658             len = MIN(len, fifo8_num_free(&s->cmdfifo));
659             s->dma_memory_read(s->dma_opaque, buf, len);
660             fifo8_push_all(&s->cmdfifo, buf, len);
661             esp_set_tc(s, esp_get_tc(s) - len);
662         } else {
663             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
664             esp_raise_drq(s);
665             return;
666         }
667         trace_esp_handle_ti_cmd(cmdlen);
668         s->ti_size = 0;
669         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
670             /* No command received */
671             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
672                 return;
673             }
674 
675             /* Command has been received */
676             s->do_cmd = 0;
677             do_cmd(s);
678         } else {
679             /*
680              * Extra message out bytes received: update cmdfifo_cdb_offset
681              * and then switch to command phase
682              */
683             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
684             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
685             s->rregs[ESP_RSEQ] = SEQ_CD;
686             s->rregs[ESP_RINTR] |= INTR_BS;
687             esp_raise_irq(s);
688         }
689         return;
690     }
691     if (!s->current_req) {
692         return;
693     }
694     if (s->async_len == 0) {
695         /* Defer until data is available.  */
696         return;
697     }
698     if (len > s->async_len) {
699         len = s->async_len;
700     }
701     if (to_device) {
702         if (s->dma_memory_read) {
703             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
704 
705             esp_set_tc(s, esp_get_tc(s) - len);
706             s->async_buf += len;
707             s->async_len -= len;
708             s->ti_size += len;
709 
710             if (s->async_len == 0) {
711                 scsi_req_continue(s->current_req);
712                 /*
713                  * If there is still data to be read from the device then
714                  * complete the DMA operation immediately.  Otherwise defer
715                  * until the scsi layer has completed.
716                  */
717                 return;
718             }
719 
720             /* Partially filled a scsi buffer. Complete immediately.  */
721             esp_dma_done(s);
722             esp_lower_drq(s);
723         } else {
724             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
725             esp_raise_drq(s);
726         }
727     } else {
728         if (s->dma_memory_write) {
729             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
730 
731             esp_set_tc(s, esp_get_tc(s) - len);
732             s->async_buf += len;
733             s->async_len -= len;
734             s->ti_size -= len;
735 
736             if (s->async_len == 0) {
737                 scsi_req_continue(s->current_req);
738                 /*
739                  * If there is still data to be read from the device then
740                  * complete the DMA operation immediately.  Otherwise defer
741                  * until the scsi layer has completed.
742                  */
743                 if (esp_get_tc(s) != 0 || s->ti_size == 0) {
744                     return;
745                 }
746             }
747 
748             /* Partially filled a scsi buffer. Complete immediately.  */
749             esp_dma_done(s);
750             esp_lower_drq(s);
751         } else {
752             /* Adjust TC for any leftover data in the FIFO */
753             if (!fifo8_is_empty(&s->fifo)) {
754                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
755             }
756 
757             /* Copy device data to FIFO */
758             len = MIN(len, fifo8_num_free(&s->fifo));
759             fifo8_push_all(&s->fifo, s->async_buf, len);
760             s->async_buf += len;
761             s->async_len -= len;
762             s->ti_size -= len;
763 
764             /*
765              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
766              * commands shorter than this must be padded accordingly
767              */
768             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
769                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
770                     esp_fifo_push(&s->fifo, 0);
771                     len++;
772                 }
773             }
774 
775             esp_set_tc(s, esp_get_tc(s) - len);
776             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
777             esp_raise_drq(s);
778         }
779     }
780 }
781 
782 static void esp_do_nodma(ESPState *s)
783 {
784     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
785     uint8_t buf[ESP_FIFO_SZ];
786     uint32_t cmdlen;
787     int len, n;
788 
789     if (s->do_cmd) {
790         /* Copy FIFO into cmdfifo */
791         n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
792         n = MIN(fifo8_num_free(&s->cmdfifo), n);
793         fifo8_push_all(&s->cmdfifo, buf, n);
794 
795         cmdlen = fifo8_num_used(&s->cmdfifo);
796         trace_esp_handle_ti_cmd(cmdlen);
797         s->ti_size = 0;
798         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
799             /* No command received */
800             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
801                 return;
802             }
803 
804             /* Command has been received */
805             s->do_cmd = 0;
806             do_cmd(s);
807         } else {
808             /*
809              * Extra message out bytes received: update cmdfifo_cdb_offset
810              * and then switch to command phase
811              */
812             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
813             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
814             s->rregs[ESP_RSEQ] = SEQ_CD;
815             s->rregs[ESP_RINTR] |= INTR_BS;
816             esp_raise_irq(s);
817         }
818         return;
819     }
820 
821     if (!s->current_req) {
822         return;
823     }
824 
825     if (s->async_len == 0) {
826         /* Defer until data is available.  */
827         return;
828     }
829 
830     if (to_device) {
831         len = MIN(s->async_len, ESP_FIFO_SZ);
832         len = MIN(len, fifo8_num_used(&s->fifo));
833         esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
834         s->async_buf += len;
835         s->async_len -= len;
836         s->ti_size += len;
837     } else {
838         if (fifo8_is_empty(&s->fifo)) {
839             fifo8_push(&s->fifo, s->async_buf[0]);
840             s->async_buf++;
841             s->async_len--;
842             s->ti_size--;
843         }
844     }
845 
846     if (s->async_len == 0) {
847         scsi_req_continue(s->current_req);
848         return;
849     }
850 
851     s->rregs[ESP_RINTR] |= INTR_BS;
852     esp_raise_irq(s);
853 }
854 
855 static void esp_pdma_cb(ESPState *s)
856 {
857     switch (s->pdma_cb) {
858     case SATN_PDMA_CB:
859         satn_pdma_cb(s);
860         break;
861     case S_WITHOUT_SATN_PDMA_CB:
862         s_without_satn_pdma_cb(s);
863         break;
864     case SATN_STOP_PDMA_CB:
865         satn_stop_pdma_cb(s);
866         break;
867     case WRITE_RESPONSE_PDMA_CB:
868         write_response_pdma_cb(s);
869         break;
870     case DO_DMA_PDMA_CB:
871         do_dma_pdma_cb(s);
872         break;
873     default:
874         g_assert_not_reached();
875     }
876 }
877 
878 void esp_command_complete(SCSIRequest *req, size_t resid)
879 {
880     ESPState *s = req->hba_private;
881     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
882 
883     trace_esp_command_complete();
884 
885     /*
886      * Non-DMA transfers from the target will leave the last byte in
887      * the FIFO so don't reset ti_size in this case
888      */
889     if (s->dma || to_device) {
890         if (s->ti_size != 0) {
891             trace_esp_command_complete_unexpected();
892         }
893         s->ti_size = 0;
894     }
895 
896     s->async_len = 0;
897     if (req->status) {
898         trace_esp_command_complete_fail();
899     }
900     s->status = req->status;
901 
902     /*
903      * If the transfer is finished, switch to status phase. For non-DMA
904      * transfers from the target the last byte is still in the FIFO
905      */
906     if (s->ti_size == 0) {
907         s->rregs[ESP_RSTAT] &= ~7;
908         s->rregs[ESP_RSTAT] |= STAT_ST;
909         esp_dma_done(s);
910         esp_lower_drq(s);
911     }
912 
913     if (s->current_req) {
914         scsi_req_unref(s->current_req);
915         s->current_req = NULL;
916         s->current_dev = NULL;
917     }
918 }
919 
920 void esp_transfer_data(SCSIRequest *req, uint32_t len)
921 {
922     ESPState *s = req->hba_private;
923     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
924     uint32_t dmalen = esp_get_tc(s);
925 
926     assert(!s->do_cmd);
927     trace_esp_transfer_data(dmalen, s->ti_size);
928     s->async_len = len;
929     s->async_buf = scsi_req_get_buf(req);
930 
931     if (!to_device && !s->data_in_ready) {
932         /*
933          * Initial incoming data xfer is complete so raise command
934          * completion interrupt
935          */
936         s->data_in_ready = true;
937         s->rregs[ESP_RSTAT] |= STAT_TC;
938         s->rregs[ESP_RINTR] |= INTR_BS;
939         esp_raise_irq(s);
940     }
941 
942     if (s->ti_cmd == 0) {
943         /*
944          * Always perform the initial transfer upon reception of the next TI
945          * command to ensure the DMA/non-DMA status of the command is correct.
946          * It is not possible to use s->dma directly in the section below as
947          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
948          * async data transfer is delayed then s->dma is set incorrectly.
949          */
950         return;
951     }
952 
953     if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
954         if (dmalen) {
955             esp_do_dma(s);
956         } else if (s->ti_size <= 0) {
957             /*
958              * If this was the last part of a DMA transfer then the
959              * completion interrupt is deferred to here.
960              */
961             esp_dma_done(s);
962             esp_lower_drq(s);
963         }
964     } else if (s->ti_cmd == CMD_TI) {
965         esp_do_nodma(s);
966     }
967 }
968 
969 static void handle_ti(ESPState *s)
970 {
971     uint32_t dmalen;
972 
973     if (s->dma && !s->dma_enabled) {
974         s->dma_cb = handle_ti;
975         return;
976     }
977 
978     s->ti_cmd = s->rregs[ESP_CMD];
979     if (s->dma) {
980         dmalen = esp_get_tc(s);
981         trace_esp_handle_ti(dmalen);
982         s->rregs[ESP_RSTAT] &= ~STAT_TC;
983         esp_do_dma(s);
984     } else {
985         trace_esp_handle_ti(s->ti_size);
986         esp_do_nodma(s);
987     }
988 }
989 
990 void esp_hard_reset(ESPState *s)
991 {
992     memset(s->rregs, 0, ESP_REGS);
993     memset(s->wregs, 0, ESP_REGS);
994     s->tchi_written = 0;
995     s->ti_size = 0;
996     s->async_len = 0;
997     fifo8_reset(&s->fifo);
998     fifo8_reset(&s->cmdfifo);
999     s->dma = 0;
1000     s->do_cmd = 0;
1001     s->dma_cb = NULL;
1002 
1003     s->rregs[ESP_CFG1] = 7;
1004 }
1005 
1006 static void esp_soft_reset(ESPState *s)
1007 {
1008     qemu_irq_lower(s->irq);
1009     qemu_irq_lower(s->irq_data);
1010     esp_hard_reset(s);
1011 }
1012 
1013 static void esp_bus_reset(ESPState *s)
1014 {
1015     bus_cold_reset(BUS(&s->bus));
1016 }
1017 
1018 static void parent_esp_reset(ESPState *s, int irq, int level)
1019 {
1020     if (level) {
1021         esp_soft_reset(s);
1022     }
1023 }
1024 
1025 static void esp_run_cmd(ESPState *s)
1026 {
1027     uint8_t cmd = s->rregs[ESP_CMD];
1028 
1029     if (cmd & CMD_DMA) {
1030         s->dma = 1;
1031         /* Reload DMA counter.  */
1032         if (esp_get_stc(s) == 0) {
1033             esp_set_tc(s, 0x10000);
1034         } else {
1035             esp_set_tc(s, esp_get_stc(s));
1036         }
1037     } else {
1038         s->dma = 0;
1039     }
1040     switch (cmd & CMD_CMD) {
1041     case CMD_NOP:
1042         trace_esp_mem_writeb_cmd_nop(cmd);
1043         break;
1044     case CMD_FLUSH:
1045         trace_esp_mem_writeb_cmd_flush(cmd);
1046         fifo8_reset(&s->fifo);
1047         break;
1048     case CMD_RESET:
1049         trace_esp_mem_writeb_cmd_reset(cmd);
1050         esp_soft_reset(s);
1051         break;
1052     case CMD_BUSRESET:
1053         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1054         esp_bus_reset(s);
1055         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1056             s->rregs[ESP_RINTR] |= INTR_RST;
1057             esp_raise_irq(s);
1058         }
1059         break;
1060     case CMD_TI:
1061         trace_esp_mem_writeb_cmd_ti(cmd);
1062         handle_ti(s);
1063         break;
1064     case CMD_ICCS:
1065         trace_esp_mem_writeb_cmd_iccs(cmd);
1066         write_response(s);
1067         s->rregs[ESP_RINTR] |= INTR_FC;
1068         s->rregs[ESP_RSTAT] |= STAT_MI;
1069         break;
1070     case CMD_MSGACC:
1071         trace_esp_mem_writeb_cmd_msgacc(cmd);
1072         s->rregs[ESP_RINTR] |= INTR_DC;
1073         s->rregs[ESP_RSEQ] = 0;
1074         s->rregs[ESP_RFLAGS] = 0;
1075         esp_raise_irq(s);
1076         break;
1077     case CMD_PAD:
1078         trace_esp_mem_writeb_cmd_pad(cmd);
1079         s->rregs[ESP_RSTAT] = STAT_TC;
1080         s->rregs[ESP_RINTR] |= INTR_FC;
1081         s->rregs[ESP_RSEQ] = 0;
1082         break;
1083     case CMD_SATN:
1084         trace_esp_mem_writeb_cmd_satn(cmd);
1085         break;
1086     case CMD_RSTATN:
1087         trace_esp_mem_writeb_cmd_rstatn(cmd);
1088         break;
1089     case CMD_SEL:
1090         trace_esp_mem_writeb_cmd_sel(cmd);
1091         handle_s_without_atn(s);
1092         break;
1093     case CMD_SELATN:
1094         trace_esp_mem_writeb_cmd_selatn(cmd);
1095         handle_satn(s);
1096         break;
1097     case CMD_SELATNS:
1098         trace_esp_mem_writeb_cmd_selatns(cmd);
1099         handle_satn_stop(s);
1100         break;
1101     case CMD_ENSEL:
1102         trace_esp_mem_writeb_cmd_ensel(cmd);
1103         s->rregs[ESP_RINTR] = 0;
1104         break;
1105     case CMD_DISSEL:
1106         trace_esp_mem_writeb_cmd_dissel(cmd);
1107         s->rregs[ESP_RINTR] = 0;
1108         esp_raise_irq(s);
1109         break;
1110     default:
1111         trace_esp_error_unhandled_command(cmd);
1112         break;
1113     }
1114 }
1115 
1116 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1117 {
1118     uint32_t val;
1119 
1120     switch (saddr) {
1121     case ESP_FIFO:
1122         if (s->dma_memory_read && s->dma_memory_write &&
1123                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
1124             /* Data out.  */
1125             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
1126             s->rregs[ESP_FIFO] = 0;
1127         } else {
1128             if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
1129                 if (s->ti_size) {
1130                     esp_do_nodma(s);
1131                 } else {
1132                     /*
1133                      * The last byte of a non-DMA transfer has been read out
1134                      * of the FIFO so switch to status phase
1135                      */
1136                     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
1137                 }
1138             }
1139             s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
1140         }
1141         val = s->rregs[ESP_FIFO];
1142         break;
1143     case ESP_RINTR:
1144         /*
1145          * Clear sequence step, interrupt register and all status bits
1146          * except TC
1147          */
1148         val = s->rregs[ESP_RINTR];
1149         s->rregs[ESP_RINTR] = 0;
1150         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1151         /*
1152          * According to the datasheet ESP_RSEQ should be cleared, but as the
1153          * emulation currently defers information transfers to the next TI
1154          * command leave it for now so that pedantic guests such as the old
1155          * Linux 2.6 driver see the correct flags before the next SCSI phase
1156          * transition.
1157          *
1158          * s->rregs[ESP_RSEQ] = SEQ_0;
1159          */
1160         esp_lower_irq(s);
1161         break;
1162     case ESP_TCHI:
1163         /* Return the unique id if the value has never been written */
1164         if (!s->tchi_written) {
1165             val = s->chip_id;
1166         } else {
1167             val = s->rregs[saddr];
1168         }
1169         break;
1170      case ESP_RFLAGS:
1171         /* Bottom 5 bits indicate number of bytes in FIFO */
1172         val = fifo8_num_used(&s->fifo);
1173         break;
1174     default:
1175         val = s->rregs[saddr];
1176         break;
1177     }
1178 
1179     trace_esp_mem_readb(saddr, val);
1180     return val;
1181 }
1182 
1183 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1184 {
1185     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1186     switch (saddr) {
1187     case ESP_TCHI:
1188         s->tchi_written = true;
1189         /* fall through */
1190     case ESP_TCLO:
1191     case ESP_TCMID:
1192         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1193         break;
1194     case ESP_FIFO:
1195         if (s->do_cmd) {
1196             if (!fifo8_is_full(&s->fifo)) {
1197                 esp_fifo_push(&s->fifo, val);
1198                 esp_fifo_push(&s->cmdfifo, fifo8_pop(&s->fifo));
1199             }
1200 
1201             /*
1202              * If any unexpected message out/command phase data is
1203              * transferred using non-DMA, raise the interrupt
1204              */
1205             if (s->rregs[ESP_CMD] == CMD_TI) {
1206                 s->rregs[ESP_RINTR] |= INTR_BS;
1207                 esp_raise_irq(s);
1208             }
1209         } else {
1210             esp_fifo_push(&s->fifo, val);
1211         }
1212         break;
1213     case ESP_CMD:
1214         s->rregs[saddr] = val;
1215         esp_run_cmd(s);
1216         break;
1217     case ESP_WBUSID ... ESP_WSYNO:
1218         break;
1219     case ESP_CFG1:
1220     case ESP_CFG2: case ESP_CFG3:
1221     case ESP_RES3: case ESP_RES4:
1222         s->rregs[saddr] = val;
1223         break;
1224     case ESP_WCCF ... ESP_WTEST:
1225         break;
1226     default:
1227         trace_esp_error_invalid_write(val, saddr);
1228         return;
1229     }
1230     s->wregs[saddr] = val;
1231 }
1232 
1233 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1234                             unsigned size, bool is_write,
1235                             MemTxAttrs attrs)
1236 {
1237     return (size == 1) || (is_write && size == 4);
1238 }
1239 
1240 static bool esp_is_before_version_5(void *opaque, int version_id)
1241 {
1242     ESPState *s = ESP(opaque);
1243 
1244     version_id = MIN(version_id, s->mig_version_id);
1245     return version_id < 5;
1246 }
1247 
1248 static bool esp_is_version_5(void *opaque, int version_id)
1249 {
1250     ESPState *s = ESP(opaque);
1251 
1252     version_id = MIN(version_id, s->mig_version_id);
1253     return version_id >= 5;
1254 }
1255 
1256 static bool esp_is_version_6(void *opaque, int version_id)
1257 {
1258     ESPState *s = ESP(opaque);
1259 
1260     version_id = MIN(version_id, s->mig_version_id);
1261     return version_id >= 6;
1262 }
1263 
1264 int esp_pre_save(void *opaque)
1265 {
1266     ESPState *s = ESP(object_resolve_path_component(
1267                       OBJECT(opaque), "esp"));
1268 
1269     s->mig_version_id = vmstate_esp.version_id;
1270     return 0;
1271 }
1272 
1273 static int esp_post_load(void *opaque, int version_id)
1274 {
1275     ESPState *s = ESP(opaque);
1276     int len, i;
1277 
1278     version_id = MIN(version_id, s->mig_version_id);
1279 
1280     if (version_id < 5) {
1281         esp_set_tc(s, s->mig_dma_left);
1282 
1283         /* Migrate ti_buf to fifo */
1284         len = s->mig_ti_wptr - s->mig_ti_rptr;
1285         for (i = 0; i < len; i++) {
1286             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1287         }
1288 
1289         /* Migrate cmdbuf to cmdfifo */
1290         for (i = 0; i < s->mig_cmdlen; i++) {
1291             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1292         }
1293     }
1294 
1295     s->mig_version_id = vmstate_esp.version_id;
1296     return 0;
1297 }
1298 
1299 /*
1300  * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1301  * guest CPU to perform the transfers between the SCSI bus and memory
1302  * itself. This is indicated by the dma_memory_read and dma_memory_write
1303  * functions being NULL (in contrast to the ESP PCI device) whilst
1304  * dma_enabled is still set.
1305  */
1306 
1307 static bool esp_pdma_needed(void *opaque)
1308 {
1309     ESPState *s = ESP(opaque);
1310 
1311     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1312            s->dma_enabled;
1313 }
1314 
1315 static const VMStateDescription vmstate_esp_pdma = {
1316     .name = "esp/pdma",
1317     .version_id = 0,
1318     .minimum_version_id = 0,
1319     .needed = esp_pdma_needed,
1320     .fields = (const VMStateField[]) {
1321         VMSTATE_UINT8(pdma_cb, ESPState),
1322         VMSTATE_END_OF_LIST()
1323     }
1324 };
1325 
1326 const VMStateDescription vmstate_esp = {
1327     .name = "esp",
1328     .version_id = 6,
1329     .minimum_version_id = 3,
1330     .post_load = esp_post_load,
1331     .fields = (const VMStateField[]) {
1332         VMSTATE_BUFFER(rregs, ESPState),
1333         VMSTATE_BUFFER(wregs, ESPState),
1334         VMSTATE_INT32(ti_size, ESPState),
1335         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1336         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1337         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1338         VMSTATE_UINT32(status, ESPState),
1339         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1340                             esp_is_before_version_5),
1341         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1342                           esp_is_before_version_5),
1343         VMSTATE_UINT32(dma, ESPState),
1344         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1345                               esp_is_before_version_5, 0, 16),
1346         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1347                               esp_is_before_version_5, 16,
1348                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1349         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1350         VMSTATE_UINT32(do_cmd, ESPState),
1351         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1352         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1353         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1354         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1355         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1356         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1357         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1358         VMSTATE_END_OF_LIST()
1359     },
1360     .subsections = (const VMStateDescription * const []) {
1361         &vmstate_esp_pdma,
1362         NULL
1363     }
1364 };
1365 
1366 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1367                                  uint64_t val, unsigned int size)
1368 {
1369     SysBusESPState *sysbus = opaque;
1370     ESPState *s = ESP(&sysbus->esp);
1371     uint32_t saddr;
1372 
1373     saddr = addr >> sysbus->it_shift;
1374     esp_reg_write(s, saddr, val);
1375 }
1376 
1377 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1378                                     unsigned int size)
1379 {
1380     SysBusESPState *sysbus = opaque;
1381     ESPState *s = ESP(&sysbus->esp);
1382     uint32_t saddr;
1383 
1384     saddr = addr >> sysbus->it_shift;
1385     return esp_reg_read(s, saddr);
1386 }
1387 
1388 static const MemoryRegionOps sysbus_esp_mem_ops = {
1389     .read = sysbus_esp_mem_read,
1390     .write = sysbus_esp_mem_write,
1391     .endianness = DEVICE_NATIVE_ENDIAN,
1392     .valid.accepts = esp_mem_accepts,
1393 };
1394 
1395 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1396                                   uint64_t val, unsigned int size)
1397 {
1398     SysBusESPState *sysbus = opaque;
1399     ESPState *s = ESP(&sysbus->esp);
1400 
1401     trace_esp_pdma_write(size);
1402 
1403     switch (size) {
1404     case 1:
1405         esp_pdma_write(s, val);
1406         break;
1407     case 2:
1408         esp_pdma_write(s, val >> 8);
1409         esp_pdma_write(s, val);
1410         break;
1411     }
1412     esp_pdma_cb(s);
1413 }
1414 
1415 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1416                                      unsigned int size)
1417 {
1418     SysBusESPState *sysbus = opaque;
1419     ESPState *s = ESP(&sysbus->esp);
1420     uint64_t val = 0;
1421 
1422     trace_esp_pdma_read(size);
1423 
1424     switch (size) {
1425     case 1:
1426         val = esp_pdma_read(s);
1427         break;
1428     case 2:
1429         val = esp_pdma_read(s);
1430         val = (val << 8) | esp_pdma_read(s);
1431         break;
1432     }
1433     if (fifo8_num_used(&s->fifo) < 2) {
1434         esp_pdma_cb(s);
1435     }
1436     return val;
1437 }
1438 
1439 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1440 {
1441     ESPState *s = container_of(req->bus, ESPState, bus);
1442 
1443     scsi_req_ref(req);
1444     s->current_req = req;
1445     return s;
1446 }
1447 
1448 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1449     .read = sysbus_esp_pdma_read,
1450     .write = sysbus_esp_pdma_write,
1451     .endianness = DEVICE_NATIVE_ENDIAN,
1452     .valid.min_access_size = 1,
1453     .valid.max_access_size = 4,
1454     .impl.min_access_size = 1,
1455     .impl.max_access_size = 2,
1456 };
1457 
1458 static const struct SCSIBusInfo esp_scsi_info = {
1459     .tcq = false,
1460     .max_target = ESP_MAX_DEVS,
1461     .max_lun = 7,
1462 
1463     .load_request = esp_load_request,
1464     .transfer_data = esp_transfer_data,
1465     .complete = esp_command_complete,
1466     .cancel = esp_request_cancelled
1467 };
1468 
1469 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1470 {
1471     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1472     ESPState *s = ESP(&sysbus->esp);
1473 
1474     switch (irq) {
1475     case 0:
1476         parent_esp_reset(s, irq, level);
1477         break;
1478     case 1:
1479         esp_dma_enable(s, irq, level);
1480         break;
1481     }
1482 }
1483 
1484 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1485 {
1486     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1487     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1488     ESPState *s = ESP(&sysbus->esp);
1489 
1490     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1491         return;
1492     }
1493 
1494     sysbus_init_irq(sbd, &s->irq);
1495     sysbus_init_irq(sbd, &s->irq_data);
1496     assert(sysbus->it_shift != -1);
1497 
1498     s->chip_id = TCHI_FAS100A;
1499     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1500                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1501     sysbus_init_mmio(sbd, &sysbus->iomem);
1502     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1503                           sysbus, "esp-pdma", 4);
1504     sysbus_init_mmio(sbd, &sysbus->pdma);
1505 
1506     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1507 
1508     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1509 }
1510 
1511 static void sysbus_esp_hard_reset(DeviceState *dev)
1512 {
1513     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1514     ESPState *s = ESP(&sysbus->esp);
1515 
1516     esp_hard_reset(s);
1517 }
1518 
1519 static void sysbus_esp_init(Object *obj)
1520 {
1521     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1522 
1523     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1524 }
1525 
1526 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1527     .name = "sysbusespscsi",
1528     .version_id = 2,
1529     .minimum_version_id = 1,
1530     .pre_save = esp_pre_save,
1531     .fields = (const VMStateField[]) {
1532         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1533         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1534         VMSTATE_END_OF_LIST()
1535     }
1536 };
1537 
1538 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1539 {
1540     DeviceClass *dc = DEVICE_CLASS(klass);
1541 
1542     dc->realize = sysbus_esp_realize;
1543     dc->reset = sysbus_esp_hard_reset;
1544     dc->vmsd = &vmstate_sysbus_esp_scsi;
1545     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1546 }
1547 
1548 static const TypeInfo sysbus_esp_info = {
1549     .name          = TYPE_SYSBUS_ESP,
1550     .parent        = TYPE_SYS_BUS_DEVICE,
1551     .instance_init = sysbus_esp_init,
1552     .instance_size = sizeof(SysBusESPState),
1553     .class_init    = sysbus_esp_class_init,
1554 };
1555 
1556 static void esp_finalize(Object *obj)
1557 {
1558     ESPState *s = ESP(obj);
1559 
1560     fifo8_destroy(&s->fifo);
1561     fifo8_destroy(&s->cmdfifo);
1562 }
1563 
1564 static void esp_init(Object *obj)
1565 {
1566     ESPState *s = ESP(obj);
1567 
1568     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1569     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1570 }
1571 
1572 static void esp_class_init(ObjectClass *klass, void *data)
1573 {
1574     DeviceClass *dc = DEVICE_CLASS(klass);
1575 
1576     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1577     dc->user_creatable = false;
1578     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1579 }
1580 
1581 static const TypeInfo esp_info = {
1582     .name = TYPE_ESP,
1583     .parent = TYPE_DEVICE,
1584     .instance_init = esp_init,
1585     .instance_finalize = esp_finalize,
1586     .instance_size = sizeof(ESPState),
1587     .class_init = esp_class_init,
1588 };
1589 
1590 static void esp_register_types(void)
1591 {
1592     type_register_static(&sysbus_esp_info);
1593     type_register_static(&esp_info);
1594 }
1595 
1596 type_init(esp_register_types)
1597