xref: /qemu/hw/scsi/esp.c (revision 49c60d1617d4870a5e7c4f9c2dc24fb3759b9679)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98         s->async_len = 0;
99     }
100 }
101 
102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103 {
104     if (fifo8_num_used(fifo) == fifo->capacity) {
105         trace_esp_error_fifo_overrun();
106         return;
107     }
108 
109     fifo8_push(fifo, val);
110 }
111 
112 static uint8_t esp_fifo_pop(Fifo8 *fifo)
113 {
114     if (fifo8_is_empty(fifo)) {
115         return 0;
116     }
117 
118     return fifo8_pop(fifo);
119 }
120 
121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122 {
123     const uint8_t *buf;
124     uint32_t n, n2;
125     int len;
126 
127     if (maxlen == 0) {
128         return 0;
129     }
130 
131     len = maxlen;
132     buf = fifo8_pop_buf(fifo, len, &n);
133     if (dest) {
134         memcpy(dest, buf, n);
135     }
136 
137     /* Add FIFO wraparound if needed */
138     len -= n;
139     len = MIN(len, fifo8_num_used(fifo));
140     if (len) {
141         buf = fifo8_pop_buf(fifo, len, &n2);
142         if (dest) {
143             memcpy(&dest[n], buf, n2);
144         }
145         n += n2;
146     }
147 
148     return n;
149 }
150 
151 static uint32_t esp_get_tc(ESPState *s)
152 {
153     uint32_t dmalen;
154 
155     dmalen = s->rregs[ESP_TCLO];
156     dmalen |= s->rregs[ESP_TCMID] << 8;
157     dmalen |= s->rregs[ESP_TCHI] << 16;
158 
159     return dmalen;
160 }
161 
162 static void esp_set_tc(ESPState *s, uint32_t dmalen)
163 {
164     s->rregs[ESP_TCLO] = dmalen;
165     s->rregs[ESP_TCMID] = dmalen >> 8;
166     s->rregs[ESP_TCHI] = dmalen >> 16;
167 }
168 
169 static uint32_t esp_get_stc(ESPState *s)
170 {
171     uint32_t dmalen;
172 
173     dmalen = s->wregs[ESP_TCLO];
174     dmalen |= s->wregs[ESP_TCMID] << 8;
175     dmalen |= s->wregs[ESP_TCHI] << 16;
176 
177     return dmalen;
178 }
179 
180 static uint8_t esp_pdma_read(ESPState *s)
181 {
182     uint8_t val;
183 
184     if (s->do_cmd) {
185         val = esp_fifo_pop(&s->cmdfifo);
186     } else {
187         val = esp_fifo_pop(&s->fifo);
188     }
189 
190     return val;
191 }
192 
193 static void esp_pdma_write(ESPState *s, uint8_t val)
194 {
195     uint32_t dmalen = esp_get_tc(s);
196 
197     if (dmalen == 0) {
198         return;
199     }
200 
201     if (s->do_cmd) {
202         esp_fifo_push(&s->cmdfifo, val);
203     } else {
204         esp_fifo_push(&s->fifo, val);
205     }
206 
207     dmalen--;
208     esp_set_tc(s, dmalen);
209 }
210 
211 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
212 {
213     s->pdma_cb = cb;
214 }
215 
216 static int esp_select(ESPState *s)
217 {
218     int target;
219 
220     target = s->wregs[ESP_WBUSID] & BUSID_DID;
221 
222     s->ti_size = 0;
223     fifo8_reset(&s->fifo);
224 
225     if (s->current_req) {
226         /* Started a new command before the old one finished. Cancel it. */
227         scsi_req_cancel(s->current_req);
228     }
229 
230     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
231     if (!s->current_dev) {
232         /* No such drive */
233         s->rregs[ESP_RSTAT] = 0;
234         s->rregs[ESP_RINTR] = INTR_DC;
235         s->rregs[ESP_RSEQ] = SEQ_0;
236         esp_raise_irq(s);
237         return -1;
238     }
239 
240     /*
241      * Note that we deliberately don't raise the IRQ here: this will be done
242      * either in do_command_phase() for DATA OUT transfers or by the deferred
243      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
244      */
245     s->rregs[ESP_RINTR] |= INTR_FC;
246     s->rregs[ESP_RSEQ] = SEQ_CD;
247     return 0;
248 }
249 
250 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
251 {
252     uint8_t buf[ESP_CMDFIFO_SZ];
253     uint32_t dmalen, n;
254     int target;
255 
256     target = s->wregs[ESP_WBUSID] & BUSID_DID;
257     if (s->dma) {
258         dmalen = MIN(esp_get_tc(s), maxlen);
259         if (dmalen == 0) {
260             return 0;
261         }
262         if (s->dma_memory_read) {
263             s->dma_memory_read(s->dma_opaque, buf, dmalen);
264             dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
265             fifo8_push_all(&s->cmdfifo, buf, dmalen);
266         } else {
267             if (esp_select(s) < 0) {
268                 return -1;
269             }
270             esp_raise_drq(s);
271             return 0;
272         }
273     } else {
274         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
275         if (dmalen == 0) {
276             return 0;
277         }
278         n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
279         n = MIN(fifo8_num_free(&s->cmdfifo), n);
280         fifo8_push_all(&s->cmdfifo, buf, n);
281     }
282     trace_esp_get_cmd(dmalen, target);
283 
284     if (esp_select(s) < 0) {
285         return -1;
286     }
287     return dmalen;
288 }
289 
290 static void do_command_phase(ESPState *s)
291 {
292     uint32_t cmdlen;
293     int32_t datalen;
294     SCSIDevice *current_lun;
295     uint8_t buf[ESP_CMDFIFO_SZ];
296 
297     trace_esp_do_command_phase(s->lun);
298     cmdlen = fifo8_num_used(&s->cmdfifo);
299     if (!cmdlen || !s->current_dev) {
300         return;
301     }
302     esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
303 
304     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
305     if (!current_lun) {
306         /* No such drive */
307         s->rregs[ESP_RSTAT] = 0;
308         s->rregs[ESP_RINTR] = INTR_DC;
309         s->rregs[ESP_RSEQ] = SEQ_0;
310         esp_raise_irq(s);
311         return;
312     }
313 
314     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
315     datalen = scsi_req_enqueue(s->current_req);
316     s->ti_size = datalen;
317     fifo8_reset(&s->cmdfifo);
318     if (datalen != 0) {
319         s->rregs[ESP_RSTAT] = STAT_TC;
320         s->rregs[ESP_RSEQ] = SEQ_CD;
321         s->ti_cmd = 0;
322         esp_set_tc(s, 0);
323         if (datalen > 0) {
324             /*
325              * Switch to DATA IN phase but wait until initial data xfer is
326              * complete before raising the command completion interrupt
327              */
328             s->data_in_ready = false;
329             s->rregs[ESP_RSTAT] |= STAT_DI;
330         } else {
331             s->rregs[ESP_RSTAT] |= STAT_DO;
332             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
333             esp_raise_irq(s);
334             esp_lower_drq(s);
335         }
336         scsi_req_continue(s->current_req);
337         return;
338     }
339 }
340 
341 static void do_message_phase(ESPState *s)
342 {
343     if (s->cmdfifo_cdb_offset) {
344         uint8_t message = esp_fifo_pop(&s->cmdfifo);
345 
346         trace_esp_do_identify(message);
347         s->lun = message & 7;
348         s->cmdfifo_cdb_offset--;
349     }
350 
351     /* Ignore extended messages for now */
352     if (s->cmdfifo_cdb_offset) {
353         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354         esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
355         s->cmdfifo_cdb_offset = 0;
356     }
357 }
358 
359 static void do_cmd(ESPState *s)
360 {
361     do_message_phase(s);
362     assert(s->cmdfifo_cdb_offset == 0);
363     do_command_phase(s);
364 }
365 
366 static void satn_pdma_cb(ESPState *s)
367 {
368     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
369         s->cmdfifo_cdb_offset = 1;
370         s->do_cmd = 0;
371         do_cmd(s);
372     }
373 }
374 
375 static void handle_satn(ESPState *s)
376 {
377     int32_t cmdlen;
378 
379     if (s->dma && !s->dma_enabled) {
380         s->dma_cb = handle_satn;
381         return;
382     }
383     esp_set_pdma_cb(s, SATN_PDMA_CB);
384     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
385     if (cmdlen > 0) {
386         s->cmdfifo_cdb_offset = 1;
387         s->do_cmd = 0;
388         do_cmd(s);
389     } else if (cmdlen == 0) {
390         s->do_cmd = 1;
391         /* Target present, but no cmd yet - switch to command phase */
392         s->rregs[ESP_RSEQ] = SEQ_CD;
393         s->rregs[ESP_RSTAT] = STAT_CD;
394     }
395 }
396 
397 static void s_without_satn_pdma_cb(ESPState *s)
398 {
399     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
400         s->cmdfifo_cdb_offset = 0;
401         s->do_cmd = 0;
402         do_cmd(s);
403     }
404 }
405 
406 static void handle_s_without_atn(ESPState *s)
407 {
408     int32_t cmdlen;
409 
410     if (s->dma && !s->dma_enabled) {
411         s->dma_cb = handle_s_without_atn;
412         return;
413     }
414     esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
415     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
416     if (cmdlen > 0) {
417         s->cmdfifo_cdb_offset = 0;
418         s->do_cmd = 0;
419         do_cmd(s);
420     } else if (cmdlen == 0) {
421         s->do_cmd = 1;
422         /* Target present, but no cmd yet - switch to command phase */
423         s->rregs[ESP_RSEQ] = SEQ_CD;
424         s->rregs[ESP_RSTAT] = STAT_CD;
425     }
426 }
427 
428 static void satn_stop_pdma_cb(ESPState *s)
429 {
430     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
431         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
432         s->do_cmd = 1;
433         s->cmdfifo_cdb_offset = 1;
434         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
435         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
436         s->rregs[ESP_RSEQ] = SEQ_CD;
437         esp_raise_irq(s);
438     }
439 }
440 
441 static void handle_satn_stop(ESPState *s)
442 {
443     int32_t cmdlen;
444 
445     if (s->dma && !s->dma_enabled) {
446         s->dma_cb = handle_satn_stop;
447         return;
448     }
449     esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
450     cmdlen = get_cmd(s, 1);
451     if (cmdlen > 0) {
452         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
453         s->do_cmd = 1;
454         s->cmdfifo_cdb_offset = 1;
455         s->rregs[ESP_RSTAT] = STAT_MO;
456         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
457         s->rregs[ESP_RSEQ] = SEQ_MO;
458         esp_raise_irq(s);
459     } else if (cmdlen == 0) {
460         s->do_cmd = 1;
461         /* Target present, switch to message out phase */
462         s->rregs[ESP_RSEQ] = SEQ_MO;
463         s->rregs[ESP_RSTAT] = STAT_MO;
464     }
465 }
466 
467 static void write_response_pdma_cb(ESPState *s)
468 {
469     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
470     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
471     s->rregs[ESP_RSEQ] = SEQ_CD;
472     esp_raise_irq(s);
473 }
474 
475 static void write_response(ESPState *s)
476 {
477     uint8_t buf[2];
478 
479     trace_esp_write_response(s->status);
480 
481     buf[0] = s->status;
482     buf[1] = 0;
483 
484     if (s->dma) {
485         if (s->dma_memory_write) {
486             s->dma_memory_write(s->dma_opaque, buf, 2);
487             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
488             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
489             s->rregs[ESP_RSEQ] = SEQ_CD;
490         } else {
491             esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
492             esp_raise_drq(s);
493             return;
494         }
495     } else {
496         fifo8_reset(&s->fifo);
497         fifo8_push_all(&s->fifo, buf, 2);
498         s->rregs[ESP_RFLAGS] = 2;
499     }
500     esp_raise_irq(s);
501 }
502 
503 static void esp_dma_done(ESPState *s)
504 {
505     s->rregs[ESP_RSTAT] |= STAT_TC;
506     s->rregs[ESP_RINTR] |= INTR_BS;
507     s->rregs[ESP_RFLAGS] = 0;
508     esp_set_tc(s, 0);
509     esp_raise_irq(s);
510 }
511 
512 static void do_dma_pdma_cb(ESPState *s)
513 {
514     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
515     int len;
516     uint32_t n;
517 
518     if (s->do_cmd) {
519         /* Ensure we have received complete command after SATN and stop */
520         if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
521             return;
522         }
523 
524         s->ti_size = 0;
525         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
526             /* No command received */
527             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
528                 return;
529             }
530 
531             /* Command has been received */
532             s->do_cmd = 0;
533             do_cmd(s);
534         } else {
535             /*
536              * Extra message out bytes received: update cmdfifo_cdb_offset
537              * and then switch to command phase
538              */
539             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
540             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
541             s->rregs[ESP_RSEQ] = SEQ_CD;
542             s->rregs[ESP_RINTR] |= INTR_BS;
543             esp_raise_irq(s);
544         }
545         return;
546     }
547 
548     if (!s->current_req) {
549         return;
550     }
551 
552     if (to_device) {
553         /* Copy FIFO data to device */
554         len = MIN(s->async_len, ESP_FIFO_SZ);
555         len = MIN(len, fifo8_num_used(&s->fifo));
556         n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
557         s->async_buf += n;
558         s->async_len -= n;
559         s->ti_size += n;
560 
561         if (n < len) {
562             /* Unaligned accesses can cause FIFO wraparound */
563             len = len - n;
564             n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
565             s->async_buf += n;
566             s->async_len -= n;
567             s->ti_size += n;
568         }
569 
570         if (s->async_len == 0) {
571             scsi_req_continue(s->current_req);
572             return;
573         }
574 
575         if (esp_get_tc(s) == 0) {
576             esp_lower_drq(s);
577             esp_dma_done(s);
578         }
579 
580         return;
581     } else {
582         if (s->async_len == 0) {
583             /* Defer until the scsi layer has completed */
584             scsi_req_continue(s->current_req);
585             s->data_in_ready = false;
586             return;
587         }
588 
589         if (esp_get_tc(s) != 0) {
590             /* Copy device data to FIFO */
591             len = MIN(s->async_len, esp_get_tc(s));
592             len = MIN(len, fifo8_num_free(&s->fifo));
593             fifo8_push_all(&s->fifo, s->async_buf, len);
594             s->async_buf += len;
595             s->async_len -= len;
596             s->ti_size -= len;
597             esp_set_tc(s, esp_get_tc(s) - len);
598 
599             if (esp_get_tc(s) == 0) {
600                 /* Indicate transfer to FIFO is complete */
601                  s->rregs[ESP_RSTAT] |= STAT_TC;
602             }
603             return;
604         }
605 
606         /* Partially filled a scsi buffer. Complete immediately.  */
607         esp_lower_drq(s);
608         esp_dma_done(s);
609     }
610 }
611 
612 static void esp_do_dma(ESPState *s)
613 {
614     uint32_t len, cmdlen;
615     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
616     uint8_t buf[ESP_CMDFIFO_SZ];
617 
618     len = esp_get_tc(s);
619     if (s->do_cmd) {
620         /*
621          * handle_ti_cmd() case: esp_do_dma() is called only from
622          * handle_ti_cmd() with do_cmd != NULL (see the assert())
623          */
624         cmdlen = fifo8_num_used(&s->cmdfifo);
625         trace_esp_do_dma(cmdlen, len);
626         if (s->dma_memory_read) {
627             len = MIN(len, fifo8_num_free(&s->cmdfifo));
628             s->dma_memory_read(s->dma_opaque, buf, len);
629             fifo8_push_all(&s->cmdfifo, buf, len);
630         } else {
631             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
632             esp_raise_drq(s);
633             return;
634         }
635         trace_esp_handle_ti_cmd(cmdlen);
636         s->ti_size = 0;
637         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
638             /* No command received */
639             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
640                 return;
641             }
642 
643             /* Command has been received */
644             s->do_cmd = 0;
645             do_cmd(s);
646         } else {
647             /*
648              * Extra message out bytes received: update cmdfifo_cdb_offset
649              * and then switch to command phase
650              */
651             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
652             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
653             s->rregs[ESP_RSEQ] = SEQ_CD;
654             s->rregs[ESP_RINTR] |= INTR_BS;
655             esp_raise_irq(s);
656         }
657         return;
658     }
659     if (!s->current_req) {
660         return;
661     }
662     if (s->async_len == 0) {
663         /* Defer until data is available.  */
664         return;
665     }
666     if (len > s->async_len) {
667         len = s->async_len;
668     }
669     if (to_device) {
670         if (s->dma_memory_read) {
671             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
672         } else {
673             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
674             esp_raise_drq(s);
675             return;
676         }
677     } else {
678         if (s->dma_memory_write) {
679             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
680         } else {
681             /* Adjust TC for any leftover data in the FIFO */
682             if (!fifo8_is_empty(&s->fifo)) {
683                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
684             }
685 
686             /* Copy device data to FIFO */
687             len = MIN(len, fifo8_num_free(&s->fifo));
688             fifo8_push_all(&s->fifo, s->async_buf, len);
689             s->async_buf += len;
690             s->async_len -= len;
691             s->ti_size -= len;
692 
693             /*
694              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
695              * commands shorter than this must be padded accordingly
696              */
697             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
698                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
699                     esp_fifo_push(&s->fifo, 0);
700                     len++;
701                 }
702             }
703 
704             esp_set_tc(s, esp_get_tc(s) - len);
705             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
706             esp_raise_drq(s);
707 
708             /* Indicate transfer to FIFO is complete */
709             s->rregs[ESP_RSTAT] |= STAT_TC;
710             return;
711         }
712     }
713     esp_set_tc(s, esp_get_tc(s) - len);
714     s->async_buf += len;
715     s->async_len -= len;
716     if (to_device) {
717         s->ti_size += len;
718     } else {
719         s->ti_size -= len;
720     }
721     if (s->async_len == 0) {
722         scsi_req_continue(s->current_req);
723         /*
724          * If there is still data to be read from the device then
725          * complete the DMA operation immediately.  Otherwise defer
726          * until the scsi layer has completed.
727          */
728         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
729             return;
730         }
731     }
732 
733     /* Partially filled a scsi buffer. Complete immediately.  */
734     esp_dma_done(s);
735     esp_lower_drq(s);
736 }
737 
738 static void esp_do_nodma(ESPState *s)
739 {
740     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
741     uint32_t cmdlen;
742     int len;
743 
744     if (s->do_cmd) {
745         cmdlen = fifo8_num_used(&s->cmdfifo);
746         trace_esp_handle_ti_cmd(cmdlen);
747         s->ti_size = 0;
748         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
749             /* No command received */
750             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
751                 return;
752             }
753 
754             /* Command has been received */
755             s->do_cmd = 0;
756             do_cmd(s);
757         } else {
758             /*
759              * Extra message out bytes received: update cmdfifo_cdb_offset
760              * and then switch to command phase
761              */
762             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
763             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
764             s->rregs[ESP_RSEQ] = SEQ_CD;
765             s->rregs[ESP_RINTR] |= INTR_BS;
766             esp_raise_irq(s);
767         }
768         return;
769     }
770 
771     if (!s->current_req) {
772         return;
773     }
774 
775     if (s->async_len == 0) {
776         /* Defer until data is available.  */
777         return;
778     }
779 
780     if (to_device) {
781         len = MIN(s->async_len, ESP_FIFO_SZ);
782         len = MIN(len, fifo8_num_used(&s->fifo));
783         esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
784         s->async_buf += len;
785         s->async_len -= len;
786         s->ti_size += len;
787     } else {
788         if (fifo8_is_empty(&s->fifo)) {
789             fifo8_push(&s->fifo, s->async_buf[0]);
790             s->async_buf++;
791             s->async_len--;
792             s->ti_size--;
793         }
794     }
795 
796     if (s->async_len == 0) {
797         scsi_req_continue(s->current_req);
798         return;
799     }
800 
801     s->rregs[ESP_RINTR] |= INTR_BS;
802     esp_raise_irq(s);
803 }
804 
805 static void esp_pdma_cb(ESPState *s)
806 {
807     switch (s->pdma_cb) {
808     case SATN_PDMA_CB:
809         satn_pdma_cb(s);
810         break;
811     case S_WITHOUT_SATN_PDMA_CB:
812         s_without_satn_pdma_cb(s);
813         break;
814     case SATN_STOP_PDMA_CB:
815         satn_stop_pdma_cb(s);
816         break;
817     case WRITE_RESPONSE_PDMA_CB:
818         write_response_pdma_cb(s);
819         break;
820     case DO_DMA_PDMA_CB:
821         do_dma_pdma_cb(s);
822         break;
823     default:
824         g_assert_not_reached();
825     }
826 }
827 
828 void esp_command_complete(SCSIRequest *req, size_t resid)
829 {
830     ESPState *s = req->hba_private;
831     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
832 
833     trace_esp_command_complete();
834 
835     /*
836      * Non-DMA transfers from the target will leave the last byte in
837      * the FIFO so don't reset ti_size in this case
838      */
839     if (s->dma || to_device) {
840         if (s->ti_size != 0) {
841             trace_esp_command_complete_unexpected();
842         }
843         s->ti_size = 0;
844     }
845 
846     s->async_len = 0;
847     if (req->status) {
848         trace_esp_command_complete_fail();
849     }
850     s->status = req->status;
851 
852     /*
853      * If the transfer is finished, switch to status phase. For non-DMA
854      * transfers from the target the last byte is still in the FIFO
855      */
856     if (s->ti_size == 0) {
857         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
858         esp_dma_done(s);
859         esp_lower_drq(s);
860     }
861 
862     if (s->current_req) {
863         scsi_req_unref(s->current_req);
864         s->current_req = NULL;
865         s->current_dev = NULL;
866     }
867 }
868 
869 void esp_transfer_data(SCSIRequest *req, uint32_t len)
870 {
871     ESPState *s = req->hba_private;
872     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
873     uint32_t dmalen = esp_get_tc(s);
874 
875     assert(!s->do_cmd);
876     trace_esp_transfer_data(dmalen, s->ti_size);
877     s->async_len = len;
878     s->async_buf = scsi_req_get_buf(req);
879 
880     if (!to_device && !s->data_in_ready) {
881         /*
882          * Initial incoming data xfer is complete so raise command
883          * completion interrupt
884          */
885         s->data_in_ready = true;
886         s->rregs[ESP_RSTAT] |= STAT_TC;
887         s->rregs[ESP_RINTR] |= INTR_BS;
888         esp_raise_irq(s);
889     }
890 
891     if (s->ti_cmd == 0) {
892         /*
893          * Always perform the initial transfer upon reception of the next TI
894          * command to ensure the DMA/non-DMA status of the command is correct.
895          * It is not possible to use s->dma directly in the section below as
896          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
897          * async data transfer is delayed then s->dma is set incorrectly.
898          */
899         return;
900     }
901 
902     if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
903         if (dmalen) {
904             esp_do_dma(s);
905         } else if (s->ti_size <= 0) {
906             /*
907              * If this was the last part of a DMA transfer then the
908              * completion interrupt is deferred to here.
909              */
910             esp_dma_done(s);
911             esp_lower_drq(s);
912         }
913     } else if (s->ti_cmd == CMD_TI) {
914         esp_do_nodma(s);
915     }
916 }
917 
918 static void handle_ti(ESPState *s)
919 {
920     uint32_t dmalen;
921 
922     if (s->dma && !s->dma_enabled) {
923         s->dma_cb = handle_ti;
924         return;
925     }
926 
927     s->ti_cmd = s->rregs[ESP_CMD];
928     if (s->dma) {
929         dmalen = esp_get_tc(s);
930         trace_esp_handle_ti(dmalen);
931         s->rregs[ESP_RSTAT] &= ~STAT_TC;
932         esp_do_dma(s);
933     } else {
934         trace_esp_handle_ti(s->ti_size);
935         esp_do_nodma(s);
936     }
937 }
938 
939 void esp_hard_reset(ESPState *s)
940 {
941     memset(s->rregs, 0, ESP_REGS);
942     memset(s->wregs, 0, ESP_REGS);
943     s->tchi_written = 0;
944     s->ti_size = 0;
945     s->async_len = 0;
946     fifo8_reset(&s->fifo);
947     fifo8_reset(&s->cmdfifo);
948     s->dma = 0;
949     s->do_cmd = 0;
950     s->dma_cb = NULL;
951 
952     s->rregs[ESP_CFG1] = 7;
953 }
954 
955 static void esp_soft_reset(ESPState *s)
956 {
957     qemu_irq_lower(s->irq);
958     qemu_irq_lower(s->irq_data);
959     esp_hard_reset(s);
960 }
961 
962 static void esp_bus_reset(ESPState *s)
963 {
964     bus_cold_reset(BUS(&s->bus));
965 }
966 
967 static void parent_esp_reset(ESPState *s, int irq, int level)
968 {
969     if (level) {
970         esp_soft_reset(s);
971     }
972 }
973 
974 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
975 {
976     uint32_t val;
977 
978     switch (saddr) {
979     case ESP_FIFO:
980         if (s->dma_memory_read && s->dma_memory_write &&
981                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
982             /* Data out.  */
983             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
984             s->rregs[ESP_FIFO] = 0;
985         } else {
986             if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
987                 if (s->ti_size) {
988                     esp_do_nodma(s);
989                 } else {
990                     /*
991                      * The last byte of a non-DMA transfer has been read out
992                      * of the FIFO so switch to status phase
993                      */
994                     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
995                 }
996             }
997             s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
998         }
999         val = s->rregs[ESP_FIFO];
1000         break;
1001     case ESP_RINTR:
1002         /*
1003          * Clear sequence step, interrupt register and all status bits
1004          * except TC
1005          */
1006         val = s->rregs[ESP_RINTR];
1007         s->rregs[ESP_RINTR] = 0;
1008         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1009         /*
1010          * According to the datasheet ESP_RSEQ should be cleared, but as the
1011          * emulation currently defers information transfers to the next TI
1012          * command leave it for now so that pedantic guests such as the old
1013          * Linux 2.6 driver see the correct flags before the next SCSI phase
1014          * transition.
1015          *
1016          * s->rregs[ESP_RSEQ] = SEQ_0;
1017          */
1018         esp_lower_irq(s);
1019         break;
1020     case ESP_TCHI:
1021         /* Return the unique id if the value has never been written */
1022         if (!s->tchi_written) {
1023             val = s->chip_id;
1024         } else {
1025             val = s->rregs[saddr];
1026         }
1027         break;
1028      case ESP_RFLAGS:
1029         /* Bottom 5 bits indicate number of bytes in FIFO */
1030         val = fifo8_num_used(&s->fifo);
1031         break;
1032     default:
1033         val = s->rregs[saddr];
1034         break;
1035     }
1036 
1037     trace_esp_mem_readb(saddr, val);
1038     return val;
1039 }
1040 
1041 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1042 {
1043     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1044     switch (saddr) {
1045     case ESP_TCHI:
1046         s->tchi_written = true;
1047         /* fall through */
1048     case ESP_TCLO:
1049     case ESP_TCMID:
1050         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1051         break;
1052     case ESP_FIFO:
1053         if (s->do_cmd) {
1054             esp_fifo_push(&s->cmdfifo, val);
1055 
1056             /*
1057              * If any unexpected message out/command phase data is
1058              * transferred using non-DMA, raise the interrupt
1059              */
1060             if (s->rregs[ESP_CMD] == CMD_TI) {
1061                 s->rregs[ESP_RINTR] |= INTR_BS;
1062                 esp_raise_irq(s);
1063             }
1064         } else {
1065             esp_fifo_push(&s->fifo, val);
1066         }
1067         break;
1068     case ESP_CMD:
1069         s->rregs[saddr] = val;
1070         if (val & CMD_DMA) {
1071             s->dma = 1;
1072             /* Reload DMA counter.  */
1073             if (esp_get_stc(s) == 0) {
1074                 esp_set_tc(s, 0x10000);
1075             } else {
1076                 esp_set_tc(s, esp_get_stc(s));
1077             }
1078         } else {
1079             s->dma = 0;
1080         }
1081         switch (val & CMD_CMD) {
1082         case CMD_NOP:
1083             trace_esp_mem_writeb_cmd_nop(val);
1084             break;
1085         case CMD_FLUSH:
1086             trace_esp_mem_writeb_cmd_flush(val);
1087             fifo8_reset(&s->fifo);
1088             break;
1089         case CMD_RESET:
1090             trace_esp_mem_writeb_cmd_reset(val);
1091             esp_soft_reset(s);
1092             break;
1093         case CMD_BUSRESET:
1094             trace_esp_mem_writeb_cmd_bus_reset(val);
1095             esp_bus_reset(s);
1096             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1097                 s->rregs[ESP_RINTR] |= INTR_RST;
1098                 esp_raise_irq(s);
1099             }
1100             break;
1101         case CMD_TI:
1102             trace_esp_mem_writeb_cmd_ti(val);
1103             handle_ti(s);
1104             break;
1105         case CMD_ICCS:
1106             trace_esp_mem_writeb_cmd_iccs(val);
1107             write_response(s);
1108             s->rregs[ESP_RINTR] |= INTR_FC;
1109             s->rregs[ESP_RSTAT] |= STAT_MI;
1110             break;
1111         case CMD_MSGACC:
1112             trace_esp_mem_writeb_cmd_msgacc(val);
1113             s->rregs[ESP_RINTR] |= INTR_DC;
1114             s->rregs[ESP_RSEQ] = 0;
1115             s->rregs[ESP_RFLAGS] = 0;
1116             esp_raise_irq(s);
1117             break;
1118         case CMD_PAD:
1119             trace_esp_mem_writeb_cmd_pad(val);
1120             s->rregs[ESP_RSTAT] = STAT_TC;
1121             s->rregs[ESP_RINTR] |= INTR_FC;
1122             s->rregs[ESP_RSEQ] = 0;
1123             break;
1124         case CMD_SATN:
1125             trace_esp_mem_writeb_cmd_satn(val);
1126             break;
1127         case CMD_RSTATN:
1128             trace_esp_mem_writeb_cmd_rstatn(val);
1129             break;
1130         case CMD_SEL:
1131             trace_esp_mem_writeb_cmd_sel(val);
1132             handle_s_without_atn(s);
1133             break;
1134         case CMD_SELATN:
1135             trace_esp_mem_writeb_cmd_selatn(val);
1136             handle_satn(s);
1137             break;
1138         case CMD_SELATNS:
1139             trace_esp_mem_writeb_cmd_selatns(val);
1140             handle_satn_stop(s);
1141             break;
1142         case CMD_ENSEL:
1143             trace_esp_mem_writeb_cmd_ensel(val);
1144             s->rregs[ESP_RINTR] = 0;
1145             break;
1146         case CMD_DISSEL:
1147             trace_esp_mem_writeb_cmd_dissel(val);
1148             s->rregs[ESP_RINTR] = 0;
1149             esp_raise_irq(s);
1150             break;
1151         default:
1152             trace_esp_error_unhandled_command(val);
1153             break;
1154         }
1155         break;
1156     case ESP_WBUSID ... ESP_WSYNO:
1157         break;
1158     case ESP_CFG1:
1159     case ESP_CFG2: case ESP_CFG3:
1160     case ESP_RES3: case ESP_RES4:
1161         s->rregs[saddr] = val;
1162         break;
1163     case ESP_WCCF ... ESP_WTEST:
1164         break;
1165     default:
1166         trace_esp_error_invalid_write(val, saddr);
1167         return;
1168     }
1169     s->wregs[saddr] = val;
1170 }
1171 
1172 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1173                             unsigned size, bool is_write,
1174                             MemTxAttrs attrs)
1175 {
1176     return (size == 1) || (is_write && size == 4);
1177 }
1178 
1179 static bool esp_is_before_version_5(void *opaque, int version_id)
1180 {
1181     ESPState *s = ESP(opaque);
1182 
1183     version_id = MIN(version_id, s->mig_version_id);
1184     return version_id < 5;
1185 }
1186 
1187 static bool esp_is_version_5(void *opaque, int version_id)
1188 {
1189     ESPState *s = ESP(opaque);
1190 
1191     version_id = MIN(version_id, s->mig_version_id);
1192     return version_id >= 5;
1193 }
1194 
1195 static bool esp_is_version_6(void *opaque, int version_id)
1196 {
1197     ESPState *s = ESP(opaque);
1198 
1199     version_id = MIN(version_id, s->mig_version_id);
1200     return version_id >= 6;
1201 }
1202 
1203 int esp_pre_save(void *opaque)
1204 {
1205     ESPState *s = ESP(object_resolve_path_component(
1206                       OBJECT(opaque), "esp"));
1207 
1208     s->mig_version_id = vmstate_esp.version_id;
1209     return 0;
1210 }
1211 
1212 static int esp_post_load(void *opaque, int version_id)
1213 {
1214     ESPState *s = ESP(opaque);
1215     int len, i;
1216 
1217     version_id = MIN(version_id, s->mig_version_id);
1218 
1219     if (version_id < 5) {
1220         esp_set_tc(s, s->mig_dma_left);
1221 
1222         /* Migrate ti_buf to fifo */
1223         len = s->mig_ti_wptr - s->mig_ti_rptr;
1224         for (i = 0; i < len; i++) {
1225             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1226         }
1227 
1228         /* Migrate cmdbuf to cmdfifo */
1229         for (i = 0; i < s->mig_cmdlen; i++) {
1230             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1231         }
1232     }
1233 
1234     s->mig_version_id = vmstate_esp.version_id;
1235     return 0;
1236 }
1237 
1238 /*
1239  * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1240  * guest CPU to perform the transfers between the SCSI bus and memory
1241  * itself. This is indicated by the dma_memory_read and dma_memory_write
1242  * functions being NULL (in contrast to the ESP PCI device) whilst
1243  * dma_enabled is still set.
1244  */
1245 
1246 static bool esp_pdma_needed(void *opaque)
1247 {
1248     ESPState *s = ESP(opaque);
1249 
1250     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1251            s->dma_enabled;
1252 }
1253 
1254 static const VMStateDescription vmstate_esp_pdma = {
1255     .name = "esp/pdma",
1256     .version_id = 0,
1257     .minimum_version_id = 0,
1258     .needed = esp_pdma_needed,
1259     .fields = (const VMStateField[]) {
1260         VMSTATE_UINT8(pdma_cb, ESPState),
1261         VMSTATE_END_OF_LIST()
1262     }
1263 };
1264 
1265 const VMStateDescription vmstate_esp = {
1266     .name = "esp",
1267     .version_id = 6,
1268     .minimum_version_id = 3,
1269     .post_load = esp_post_load,
1270     .fields = (const VMStateField[]) {
1271         VMSTATE_BUFFER(rregs, ESPState),
1272         VMSTATE_BUFFER(wregs, ESPState),
1273         VMSTATE_INT32(ti_size, ESPState),
1274         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1275         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1276         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1277         VMSTATE_UINT32(status, ESPState),
1278         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1279                             esp_is_before_version_5),
1280         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1281                           esp_is_before_version_5),
1282         VMSTATE_UINT32(dma, ESPState),
1283         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1284                               esp_is_before_version_5, 0, 16),
1285         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1286                               esp_is_before_version_5, 16,
1287                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1288         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1289         VMSTATE_UINT32(do_cmd, ESPState),
1290         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1291         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1292         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1293         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1294         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1295         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1296         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1297         VMSTATE_END_OF_LIST()
1298     },
1299     .subsections = (const VMStateDescription * const []) {
1300         &vmstate_esp_pdma,
1301         NULL
1302     }
1303 };
1304 
1305 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1306                                  uint64_t val, unsigned int size)
1307 {
1308     SysBusESPState *sysbus = opaque;
1309     ESPState *s = ESP(&sysbus->esp);
1310     uint32_t saddr;
1311 
1312     saddr = addr >> sysbus->it_shift;
1313     esp_reg_write(s, saddr, val);
1314 }
1315 
1316 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1317                                     unsigned int size)
1318 {
1319     SysBusESPState *sysbus = opaque;
1320     ESPState *s = ESP(&sysbus->esp);
1321     uint32_t saddr;
1322 
1323     saddr = addr >> sysbus->it_shift;
1324     return esp_reg_read(s, saddr);
1325 }
1326 
1327 static const MemoryRegionOps sysbus_esp_mem_ops = {
1328     .read = sysbus_esp_mem_read,
1329     .write = sysbus_esp_mem_write,
1330     .endianness = DEVICE_NATIVE_ENDIAN,
1331     .valid.accepts = esp_mem_accepts,
1332 };
1333 
1334 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1335                                   uint64_t val, unsigned int size)
1336 {
1337     SysBusESPState *sysbus = opaque;
1338     ESPState *s = ESP(&sysbus->esp);
1339 
1340     trace_esp_pdma_write(size);
1341 
1342     switch (size) {
1343     case 1:
1344         esp_pdma_write(s, val);
1345         break;
1346     case 2:
1347         esp_pdma_write(s, val >> 8);
1348         esp_pdma_write(s, val);
1349         break;
1350     }
1351     esp_pdma_cb(s);
1352 }
1353 
1354 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1355                                      unsigned int size)
1356 {
1357     SysBusESPState *sysbus = opaque;
1358     ESPState *s = ESP(&sysbus->esp);
1359     uint64_t val = 0;
1360 
1361     trace_esp_pdma_read(size);
1362 
1363     switch (size) {
1364     case 1:
1365         val = esp_pdma_read(s);
1366         break;
1367     case 2:
1368         val = esp_pdma_read(s);
1369         val = (val << 8) | esp_pdma_read(s);
1370         break;
1371     }
1372     if (fifo8_num_used(&s->fifo) < 2) {
1373         esp_pdma_cb(s);
1374     }
1375     return val;
1376 }
1377 
1378 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1379 {
1380     ESPState *s = container_of(req->bus, ESPState, bus);
1381 
1382     scsi_req_ref(req);
1383     s->current_req = req;
1384     return s;
1385 }
1386 
1387 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1388     .read = sysbus_esp_pdma_read,
1389     .write = sysbus_esp_pdma_write,
1390     .endianness = DEVICE_NATIVE_ENDIAN,
1391     .valid.min_access_size = 1,
1392     .valid.max_access_size = 4,
1393     .impl.min_access_size = 1,
1394     .impl.max_access_size = 2,
1395 };
1396 
1397 static const struct SCSIBusInfo esp_scsi_info = {
1398     .tcq = false,
1399     .max_target = ESP_MAX_DEVS,
1400     .max_lun = 7,
1401 
1402     .load_request = esp_load_request,
1403     .transfer_data = esp_transfer_data,
1404     .complete = esp_command_complete,
1405     .cancel = esp_request_cancelled
1406 };
1407 
1408 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1409 {
1410     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1411     ESPState *s = ESP(&sysbus->esp);
1412 
1413     switch (irq) {
1414     case 0:
1415         parent_esp_reset(s, irq, level);
1416         break;
1417     case 1:
1418         esp_dma_enable(s, irq, level);
1419         break;
1420     }
1421 }
1422 
1423 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1424 {
1425     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1426     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1427     ESPState *s = ESP(&sysbus->esp);
1428 
1429     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1430         return;
1431     }
1432 
1433     sysbus_init_irq(sbd, &s->irq);
1434     sysbus_init_irq(sbd, &s->irq_data);
1435     assert(sysbus->it_shift != -1);
1436 
1437     s->chip_id = TCHI_FAS100A;
1438     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1439                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1440     sysbus_init_mmio(sbd, &sysbus->iomem);
1441     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1442                           sysbus, "esp-pdma", 4);
1443     sysbus_init_mmio(sbd, &sysbus->pdma);
1444 
1445     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1446 
1447     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1448 }
1449 
1450 static void sysbus_esp_hard_reset(DeviceState *dev)
1451 {
1452     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1453     ESPState *s = ESP(&sysbus->esp);
1454 
1455     esp_hard_reset(s);
1456 }
1457 
1458 static void sysbus_esp_init(Object *obj)
1459 {
1460     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1461 
1462     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1463 }
1464 
1465 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1466     .name = "sysbusespscsi",
1467     .version_id = 2,
1468     .minimum_version_id = 1,
1469     .pre_save = esp_pre_save,
1470     .fields = (const VMStateField[]) {
1471         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1472         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1473         VMSTATE_END_OF_LIST()
1474     }
1475 };
1476 
1477 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1478 {
1479     DeviceClass *dc = DEVICE_CLASS(klass);
1480 
1481     dc->realize = sysbus_esp_realize;
1482     dc->reset = sysbus_esp_hard_reset;
1483     dc->vmsd = &vmstate_sysbus_esp_scsi;
1484     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1485 }
1486 
1487 static const TypeInfo sysbus_esp_info = {
1488     .name          = TYPE_SYSBUS_ESP,
1489     .parent        = TYPE_SYS_BUS_DEVICE,
1490     .instance_init = sysbus_esp_init,
1491     .instance_size = sizeof(SysBusESPState),
1492     .class_init    = sysbus_esp_class_init,
1493 };
1494 
1495 static void esp_finalize(Object *obj)
1496 {
1497     ESPState *s = ESP(obj);
1498 
1499     fifo8_destroy(&s->fifo);
1500     fifo8_destroy(&s->cmdfifo);
1501 }
1502 
1503 static void esp_init(Object *obj)
1504 {
1505     ESPState *s = ESP(obj);
1506 
1507     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1508     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1509 }
1510 
1511 static void esp_class_init(ObjectClass *klass, void *data)
1512 {
1513     DeviceClass *dc = DEVICE_CLASS(klass);
1514 
1515     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1516     dc->user_creatable = false;
1517     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1518 }
1519 
1520 static const TypeInfo esp_info = {
1521     .name = TYPE_ESP,
1522     .parent = TYPE_DEVICE,
1523     .instance_init = esp_init,
1524     .instance_finalize = esp_finalize,
1525     .instance_size = sizeof(ESPState),
1526     .class_init = esp_class_init,
1527 };
1528 
1529 static void esp_register_types(void)
1530 {
1531     type_register_static(&sysbus_esp_info);
1532     type_register_static(&esp_info);
1533 }
1534 
1535 type_init(esp_register_types)
1536