xref: /qemu/hw/scsi/esp.c (revision 1f46d1c3a530a9180b3bc202b99615c5a04938af)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 void esp_dma_enable(ESPState *s, int irq, int level)
83 {
84     if (level) {
85         s->dma_enabled = 1;
86         trace_esp_dma_enable();
87         if (s->dma_cb) {
88             s->dma_cb(s);
89             s->dma_cb = NULL;
90         }
91     } else {
92         trace_esp_dma_disable();
93         s->dma_enabled = 0;
94     }
95 }
96 
97 void esp_request_cancelled(SCSIRequest *req)
98 {
99     ESPState *s = req->hba_private;
100 
101     if (req == s->current_req) {
102         scsi_req_unref(s->current_req);
103         s->current_req = NULL;
104         s->current_dev = NULL;
105         s->async_len = 0;
106     }
107 }
108 
109 static void esp_fifo_push(ESPState *s, uint8_t val)
110 {
111     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
112         trace_esp_error_fifo_overrun();
113         return;
114     }
115 
116     fifo8_push(&s->fifo, val);
117 }
118 
119 static uint8_t esp_fifo_pop(ESPState *s)
120 {
121     if (fifo8_is_empty(&s->fifo)) {
122         return 0;
123     }
124 
125     return fifo8_pop(&s->fifo);
126 }
127 
128 static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
129 {
130     const uint8_t *buf;
131     uint32_t n, n2;
132     int len;
133 
134     if (maxlen == 0) {
135         return 0;
136     }
137 
138     len = maxlen;
139     buf = fifo8_pop_buf(fifo, len, &n);
140     if (dest) {
141         memcpy(dest, buf, n);
142     }
143 
144     /* Add FIFO wraparound if needed */
145     len -= n;
146     len = MIN(len, fifo8_num_used(fifo));
147     if (len) {
148         buf = fifo8_pop_buf(fifo, len, &n2);
149         if (dest) {
150             memcpy(&dest[n], buf, n2);
151         }
152         n += n2;
153     }
154 
155     return n;
156 }
157 
158 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
159 {
160     return esp_fifo8_pop_buf(fifo, dest, maxlen);
161 }
162 
163 static uint32_t esp_get_tc(ESPState *s)
164 {
165     uint32_t dmalen;
166 
167     dmalen = s->rregs[ESP_TCLO];
168     dmalen |= s->rregs[ESP_TCMID] << 8;
169     dmalen |= s->rregs[ESP_TCHI] << 16;
170 
171     return dmalen;
172 }
173 
174 static void esp_set_tc(ESPState *s, uint32_t dmalen)
175 {
176     uint32_t old_tc = esp_get_tc(s);
177 
178     s->rregs[ESP_TCLO] = dmalen;
179     s->rregs[ESP_TCMID] = dmalen >> 8;
180     s->rregs[ESP_TCHI] = dmalen >> 16;
181 
182     if (old_tc && dmalen == 0) {
183         s->rregs[ESP_RSTAT] |= STAT_TC;
184     }
185 }
186 
187 static uint32_t esp_get_stc(ESPState *s)
188 {
189     uint32_t dmalen;
190 
191     dmalen = s->wregs[ESP_TCLO];
192     dmalen |= s->wregs[ESP_TCMID] << 8;
193     dmalen |= s->wregs[ESP_TCHI] << 16;
194 
195     return dmalen;
196 }
197 
198 static const char *esp_phase_names[8] = {
199     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
200     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
201 };
202 
203 static void esp_set_phase(ESPState *s, uint8_t phase)
204 {
205     s->rregs[ESP_RSTAT] &= ~7;
206     s->rregs[ESP_RSTAT] |= phase;
207 
208     trace_esp_set_phase(esp_phase_names[phase]);
209 }
210 
211 static uint8_t esp_get_phase(ESPState *s)
212 {
213     return s->rregs[ESP_RSTAT] & 7;
214 }
215 
216 static uint8_t esp_pdma_read(ESPState *s)
217 {
218     uint8_t val;
219 
220     val = esp_fifo_pop(s);
221     return val;
222 }
223 
224 static void esp_pdma_write(ESPState *s, uint8_t val)
225 {
226     uint32_t dmalen = esp_get_tc(s);
227 
228     if (dmalen == 0) {
229         return;
230     }
231 
232     esp_fifo_push(s, val);
233 
234     dmalen--;
235     esp_set_tc(s, dmalen);
236 }
237 
238 static int esp_select(ESPState *s)
239 {
240     int target;
241 
242     target = s->wregs[ESP_WBUSID] & BUSID_DID;
243 
244     s->ti_size = 0;
245     s->rregs[ESP_RSEQ] = SEQ_0;
246 
247     if (s->current_req) {
248         /* Started a new command before the old one finished. Cancel it. */
249         scsi_req_cancel(s->current_req);
250     }
251 
252     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
253     if (!s->current_dev) {
254         /* No such drive */
255         s->rregs[ESP_RSTAT] = 0;
256         s->rregs[ESP_RINTR] = INTR_DC;
257         esp_raise_irq(s);
258         return -1;
259     }
260 
261     /*
262      * Note that we deliberately don't raise the IRQ here: this will be done
263      * either in esp_transfer_data() or esp_command_complete()
264      */
265     return 0;
266 }
267 
268 static void esp_do_dma(ESPState *s);
269 static void esp_do_nodma(ESPState *s);
270 
271 static void do_command_phase(ESPState *s)
272 {
273     uint32_t cmdlen;
274     int32_t datalen;
275     SCSIDevice *current_lun;
276     uint8_t buf[ESP_CMDFIFO_SZ];
277 
278     trace_esp_do_command_phase(s->lun);
279     cmdlen = fifo8_num_used(&s->cmdfifo);
280     if (!cmdlen || !s->current_dev) {
281         return;
282     }
283     esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
284 
285     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
286     if (!current_lun) {
287         /* No such drive */
288         s->rregs[ESP_RSTAT] = 0;
289         s->rregs[ESP_RINTR] = INTR_DC;
290         s->rregs[ESP_RSEQ] = SEQ_0;
291         esp_raise_irq(s);
292         return;
293     }
294 
295     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
296     datalen = scsi_req_enqueue(s->current_req);
297     s->ti_size = datalen;
298     fifo8_reset(&s->cmdfifo);
299     s->data_ready = false;
300     if (datalen != 0) {
301         /*
302          * Switch to DATA phase but wait until initial data xfer is
303          * complete before raising the command completion interrupt
304          */
305         if (datalen > 0) {
306             esp_set_phase(s, STAT_DI);
307         } else {
308             esp_set_phase(s, STAT_DO);
309         }
310         scsi_req_continue(s->current_req);
311         return;
312     }
313 }
314 
315 static void do_message_phase(ESPState *s)
316 {
317     if (s->cmdfifo_cdb_offset) {
318         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
319                           fifo8_pop(&s->cmdfifo);
320 
321         trace_esp_do_identify(message);
322         s->lun = message & 7;
323         s->cmdfifo_cdb_offset--;
324     }
325 
326     /* Ignore extended messages for now */
327     if (s->cmdfifo_cdb_offset) {
328         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
329         esp_fifo8_pop_buf(&s->cmdfifo, NULL, len);
330         s->cmdfifo_cdb_offset = 0;
331     }
332 }
333 
334 static void do_cmd(ESPState *s)
335 {
336     do_message_phase(s);
337     assert(s->cmdfifo_cdb_offset == 0);
338     do_command_phase(s);
339 }
340 
341 static void handle_satn(ESPState *s)
342 {
343     if (s->dma && !s->dma_enabled) {
344         s->dma_cb = handle_satn;
345         return;
346     }
347 
348     if (esp_select(s) < 0) {
349         return;
350     }
351 
352     esp_set_phase(s, STAT_MO);
353 
354     if (s->dma) {
355         esp_do_dma(s);
356     } else {
357         esp_do_nodma(s);
358     }
359 }
360 
361 static void handle_s_without_atn(ESPState *s)
362 {
363     if (s->dma && !s->dma_enabled) {
364         s->dma_cb = handle_s_without_atn;
365         return;
366     }
367 
368     if (esp_select(s) < 0) {
369         return;
370     }
371 
372     esp_set_phase(s, STAT_CD);
373     s->cmdfifo_cdb_offset = 0;
374 
375     if (s->dma) {
376         esp_do_dma(s);
377     } else {
378         esp_do_nodma(s);
379     }
380 }
381 
382 static void handle_satn_stop(ESPState *s)
383 {
384     if (s->dma && !s->dma_enabled) {
385         s->dma_cb = handle_satn_stop;
386         return;
387     }
388 
389     if (esp_select(s) < 0) {
390         return;
391     }
392 
393     esp_set_phase(s, STAT_MO);
394     s->cmdfifo_cdb_offset = 0;
395 
396     if (s->dma) {
397         esp_do_dma(s);
398     } else {
399         esp_do_nodma(s);
400     }
401 }
402 
403 static void handle_pad(ESPState *s)
404 {
405     if (s->dma) {
406         esp_do_dma(s);
407     } else {
408         esp_do_nodma(s);
409     }
410 }
411 
412 static void write_response(ESPState *s)
413 {
414     trace_esp_write_response(s->status);
415 
416     if (s->dma) {
417         esp_do_dma(s);
418     } else {
419         esp_do_nodma(s);
420     }
421 }
422 
423 static int esp_cdb_length(ESPState *s)
424 {
425     const uint8_t *pbuf;
426     int cmdlen, len;
427 
428     cmdlen = fifo8_num_used(&s->cmdfifo);
429     if (cmdlen < s->cmdfifo_cdb_offset) {
430         return 0;
431     }
432 
433     pbuf = fifo8_peek_buf(&s->cmdfifo, cmdlen, NULL);
434     len = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
435 
436     return len;
437 }
438 
439 static void esp_dma_ti_check(ESPState *s)
440 {
441     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
442         s->rregs[ESP_RINTR] |= INTR_BS;
443         esp_raise_irq(s);
444         esp_lower_drq(s);
445     }
446 }
447 
448 static void esp_do_dma(ESPState *s)
449 {
450     uint32_t len, cmdlen;
451     uint8_t buf[ESP_CMDFIFO_SZ];
452 
453     len = esp_get_tc(s);
454 
455     switch (esp_get_phase(s)) {
456     case STAT_MO:
457         if (s->dma_memory_read) {
458             len = MIN(len, fifo8_num_free(&s->cmdfifo));
459             s->dma_memory_read(s->dma_opaque, buf, len);
460             esp_set_tc(s, esp_get_tc(s) - len);
461         } else {
462             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
463             len = MIN(fifo8_num_free(&s->cmdfifo), len);
464             esp_raise_drq(s);
465         }
466 
467         fifo8_push_all(&s->cmdfifo, buf, len);
468         s->cmdfifo_cdb_offset += len;
469 
470         switch (s->rregs[ESP_CMD]) {
471         case CMD_SELATN | CMD_DMA:
472             if (fifo8_num_used(&s->cmdfifo) >= 1) {
473                 /* First byte received, switch to command phase */
474                 esp_set_phase(s, STAT_CD);
475                 s->rregs[ESP_RSEQ] = SEQ_CD;
476                 s->cmdfifo_cdb_offset = 1;
477 
478                 if (fifo8_num_used(&s->cmdfifo) > 1) {
479                     /* Process any additional command phase data */
480                     esp_do_dma(s);
481                 }
482             }
483             break;
484 
485         case CMD_SELATNS | CMD_DMA:
486             if (fifo8_num_used(&s->cmdfifo) == 1) {
487                 /* First byte received, stop in message out phase */
488                 s->rregs[ESP_RSEQ] = SEQ_MO;
489                 s->cmdfifo_cdb_offset = 1;
490 
491                 /* Raise command completion interrupt */
492                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
493                 esp_raise_irq(s);
494             }
495             break;
496 
497         case CMD_TI | CMD_DMA:
498             /* ATN remains asserted until TC == 0 */
499             if (esp_get_tc(s) == 0) {
500                 esp_set_phase(s, STAT_CD);
501                 s->rregs[ESP_CMD] = 0;
502                 s->rregs[ESP_RINTR] |= INTR_BS;
503                 esp_raise_irq(s);
504             }
505             break;
506         }
507         break;
508 
509     case STAT_CD:
510         cmdlen = fifo8_num_used(&s->cmdfifo);
511         trace_esp_do_dma(cmdlen, len);
512         if (s->dma_memory_read) {
513             len = MIN(len, fifo8_num_free(&s->cmdfifo));
514             s->dma_memory_read(s->dma_opaque, buf, len);
515             fifo8_push_all(&s->cmdfifo, buf, len);
516             esp_set_tc(s, esp_get_tc(s) - len);
517         } else {
518             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
519             len = MIN(fifo8_num_free(&s->cmdfifo), len);
520             fifo8_push_all(&s->cmdfifo, buf, len);
521             esp_raise_drq(s);
522         }
523         trace_esp_handle_ti_cmd(cmdlen);
524         s->ti_size = 0;
525         if (esp_get_tc(s) == 0) {
526             /* Command has been received */
527             do_cmd(s);
528         }
529         break;
530 
531     case STAT_DO:
532         if (!s->current_req) {
533             return;
534         }
535         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
536             /* Defer until data is available.  */
537             return;
538         }
539         if (len > s->async_len) {
540             len = s->async_len;
541         }
542 
543         switch (s->rregs[ESP_CMD]) {
544         case CMD_TI | CMD_DMA:
545             if (s->dma_memory_read) {
546                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
547                 esp_set_tc(s, esp_get_tc(s) - len);
548             } else {
549                 /* Copy FIFO data to device */
550                 len = MIN(s->async_len, ESP_FIFO_SZ);
551                 len = MIN(len, fifo8_num_used(&s->fifo));
552                 len = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
553                 esp_raise_drq(s);
554             }
555 
556             s->async_buf += len;
557             s->async_len -= len;
558             s->ti_size += len;
559             break;
560 
561         case CMD_PAD | CMD_DMA:
562             /* Copy TC zero bytes into the incoming stream */
563             if (!s->dma_memory_read) {
564                 len = MIN(s->async_len, ESP_FIFO_SZ);
565                 len = MIN(len, fifo8_num_free(&s->fifo));
566             }
567 
568             memset(s->async_buf, 0, len);
569 
570             s->async_buf += len;
571             s->async_len -= len;
572             s->ti_size += len;
573             break;
574         }
575 
576         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
577             /* Defer until the scsi layer has completed */
578             scsi_req_continue(s->current_req);
579             return;
580         }
581 
582         esp_dma_ti_check(s);
583         break;
584 
585     case STAT_DI:
586         if (!s->current_req) {
587             return;
588         }
589         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
590             /* Defer until data is available.  */
591             return;
592         }
593         if (len > s->async_len) {
594             len = s->async_len;
595         }
596 
597         switch (s->rregs[ESP_CMD]) {
598         case CMD_TI | CMD_DMA:
599             if (s->dma_memory_write) {
600                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
601             } else {
602                 /* Copy device data to FIFO */
603                 len = MIN(len, fifo8_num_free(&s->fifo));
604                 fifo8_push_all(&s->fifo, s->async_buf, len);
605                 esp_raise_drq(s);
606             }
607 
608             s->async_buf += len;
609             s->async_len -= len;
610             s->ti_size -= len;
611             esp_set_tc(s, esp_get_tc(s) - len);
612             break;
613 
614         case CMD_PAD | CMD_DMA:
615             /* Drop TC bytes from the incoming stream */
616             if (!s->dma_memory_write) {
617                 len = MIN(len, fifo8_num_free(&s->fifo));
618             }
619 
620             s->async_buf += len;
621             s->async_len -= len;
622             s->ti_size -= len;
623             esp_set_tc(s, esp_get_tc(s) - len);
624             break;
625         }
626 
627         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
628             /* If the guest underflows TC then terminate SCSI request */
629             scsi_req_continue(s->current_req);
630             return;
631         }
632 
633         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
634             /* Defer until the scsi layer has completed */
635             scsi_req_continue(s->current_req);
636             return;
637         }
638 
639         esp_dma_ti_check(s);
640         break;
641 
642     case STAT_ST:
643         switch (s->rregs[ESP_CMD]) {
644         case CMD_ICCS | CMD_DMA:
645             len = MIN(len, 1);
646 
647             if (len) {
648                 buf[0] = s->status;
649 
650                 if (s->dma_memory_write) {
651                     s->dma_memory_write(s->dma_opaque, buf, len);
652                 } else {
653                     fifo8_push_all(&s->fifo, buf, len);
654                 }
655 
656                 esp_set_tc(s, esp_get_tc(s) - len);
657                 esp_set_phase(s, STAT_MI);
658 
659                 if (esp_get_tc(s) > 0) {
660                     /* Process any message in phase data */
661                     esp_do_dma(s);
662                 }
663             }
664             break;
665 
666         default:
667             /* Consume remaining data if the guest underflows TC */
668             if (fifo8_num_used(&s->fifo) < 2) {
669                 s->rregs[ESP_RINTR] |= INTR_BS;
670                 esp_raise_irq(s);
671                 esp_lower_drq(s);
672             }
673             break;
674         }
675         break;
676 
677     case STAT_MI:
678         switch (s->rregs[ESP_CMD]) {
679         case CMD_ICCS | CMD_DMA:
680             len = MIN(len, 1);
681 
682             if (len) {
683                 buf[0] = 0;
684 
685                 if (s->dma_memory_write) {
686                     s->dma_memory_write(s->dma_opaque, buf, len);
687                 } else {
688                     fifo8_push_all(&s->fifo, buf, len);
689                 }
690 
691                 esp_set_tc(s, esp_get_tc(s) - len);
692 
693                 /* Raise end of command interrupt */
694                 s->rregs[ESP_RINTR] |= INTR_FC;
695                 esp_raise_irq(s);
696             }
697             break;
698         }
699         break;
700     }
701 }
702 
703 static void esp_nodma_ti_dataout(ESPState *s)
704 {
705     int len;
706 
707     if (!s->current_req) {
708         return;
709     }
710     if (s->async_len == 0) {
711         /* Defer until data is available.  */
712         return;
713     }
714     len = MIN(s->async_len, ESP_FIFO_SZ);
715     len = MIN(len, fifo8_num_used(&s->fifo));
716     esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
717     s->async_buf += len;
718     s->async_len -= len;
719     s->ti_size += len;
720 
721     if (s->async_len == 0) {
722         scsi_req_continue(s->current_req);
723         return;
724     }
725 
726     s->rregs[ESP_RINTR] |= INTR_BS;
727     esp_raise_irq(s);
728 }
729 
730 static void esp_do_nodma(ESPState *s)
731 {
732     uint8_t buf[ESP_FIFO_SZ];
733     uint32_t cmdlen;
734     int len;
735 
736     switch (esp_get_phase(s)) {
737     case STAT_MO:
738         switch (s->rregs[ESP_CMD]) {
739         case CMD_SELATN:
740             /* Copy FIFO into cmdfifo */
741             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
742             len = MIN(fifo8_num_free(&s->cmdfifo), len);
743             fifo8_push_all(&s->cmdfifo, buf, len);
744 
745             if (fifo8_num_used(&s->cmdfifo) >= 1) {
746                 /* First byte received, switch to command phase */
747                 esp_set_phase(s, STAT_CD);
748                 s->rregs[ESP_RSEQ] = SEQ_CD;
749                 s->cmdfifo_cdb_offset = 1;
750 
751                 if (fifo8_num_used(&s->cmdfifo) > 1) {
752                     /* Process any additional command phase data */
753                     esp_do_nodma(s);
754                 }
755             }
756             break;
757 
758         case CMD_SELATNS:
759             /* Copy one byte from FIFO into cmdfifo */
760             len = esp_fifo_pop_buf(&s->fifo, buf, 1);
761             len = MIN(fifo8_num_free(&s->cmdfifo), len);
762             fifo8_push_all(&s->cmdfifo, buf, len);
763 
764             if (fifo8_num_used(&s->cmdfifo) >= 1) {
765                 /* First byte received, stop in message out phase */
766                 s->rregs[ESP_RSEQ] = SEQ_MO;
767                 s->cmdfifo_cdb_offset = 1;
768 
769                 /* Raise command completion interrupt */
770                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
771                 esp_raise_irq(s);
772             }
773             break;
774 
775         case CMD_TI:
776             /* Copy FIFO into cmdfifo */
777             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
778             len = MIN(fifo8_num_free(&s->cmdfifo), len);
779             fifo8_push_all(&s->cmdfifo, buf, len);
780 
781             /* ATN remains asserted until FIFO empty */
782             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
783             esp_set_phase(s, STAT_CD);
784             s->rregs[ESP_CMD] = 0;
785             s->rregs[ESP_RINTR] |= INTR_BS;
786             esp_raise_irq(s);
787             break;
788         }
789         break;
790 
791     case STAT_CD:
792         switch (s->rregs[ESP_CMD]) {
793         case CMD_TI:
794             /* Copy FIFO into cmdfifo */
795             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
796             len = MIN(fifo8_num_free(&s->cmdfifo), len);
797             fifo8_push_all(&s->cmdfifo, buf, len);
798 
799             cmdlen = fifo8_num_used(&s->cmdfifo);
800             trace_esp_handle_ti_cmd(cmdlen);
801 
802             /* CDB may be transferred in one or more TI commands */
803             if (esp_cdb_length(s) && esp_cdb_length(s) ==
804                 fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
805                     /* Command has been received */
806                     do_cmd(s);
807             } else {
808                 /*
809                  * If data was transferred from the FIFO then raise bus
810                  * service interrupt to indicate transfer complete. Otherwise
811                  * defer until the next FIFO write.
812                  */
813                 if (len) {
814                     /* Raise interrupt to indicate transfer complete */
815                     s->rregs[ESP_RINTR] |= INTR_BS;
816                     esp_raise_irq(s);
817                 }
818             }
819             break;
820 
821         case CMD_SEL | CMD_DMA:
822         case CMD_SELATN | CMD_DMA:
823             /* Copy FIFO into cmdfifo */
824             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
825             len = MIN(fifo8_num_free(&s->cmdfifo), len);
826             fifo8_push_all(&s->cmdfifo, buf, len);
827 
828             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
829             if (esp_cdb_length(s) && esp_cdb_length(s) ==
830                 fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset) {
831                     /* Command has been received */
832                     do_cmd(s);
833             }
834             break;
835 
836         case CMD_SEL:
837         case CMD_SELATN:
838             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
839             len = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo));
840             len = MIN(fifo8_num_free(&s->cmdfifo), len);
841             fifo8_push_all(&s->cmdfifo, buf, len);
842 
843             do_cmd(s);
844             break;
845         }
846         break;
847 
848     case STAT_DO:
849         /* Accumulate data in FIFO until non-DMA TI is executed */
850         break;
851 
852     case STAT_DI:
853         if (!s->current_req) {
854             return;
855         }
856         if (s->async_len == 0) {
857             /* Defer until data is available.  */
858             return;
859         }
860         if (fifo8_is_empty(&s->fifo)) {
861             esp_fifo_push(s, s->async_buf[0]);
862             s->async_buf++;
863             s->async_len--;
864             s->ti_size--;
865         }
866 
867         if (s->async_len == 0) {
868             scsi_req_continue(s->current_req);
869             return;
870         }
871 
872         /* If preloading the FIFO, defer until TI command issued */
873         if (s->rregs[ESP_CMD] != CMD_TI) {
874             return;
875         }
876 
877         s->rregs[ESP_RINTR] |= INTR_BS;
878         esp_raise_irq(s);
879         break;
880 
881     case STAT_ST:
882         switch (s->rregs[ESP_CMD]) {
883         case CMD_ICCS:
884             esp_fifo_push(s, s->status);
885             esp_set_phase(s, STAT_MI);
886 
887             /* Process any message in phase data */
888             esp_do_nodma(s);
889             break;
890         }
891         break;
892 
893     case STAT_MI:
894         switch (s->rregs[ESP_CMD]) {
895         case CMD_ICCS:
896             esp_fifo_push(s, 0);
897 
898             /* Raise end of command interrupt */
899             s->rregs[ESP_RINTR] |= INTR_FC;
900             esp_raise_irq(s);
901             break;
902         }
903         break;
904     }
905 }
906 
907 void esp_command_complete(SCSIRequest *req, size_t resid)
908 {
909     ESPState *s = req->hba_private;
910     int to_device = (esp_get_phase(s) == STAT_DO);
911 
912     trace_esp_command_complete();
913 
914     /*
915      * Non-DMA transfers from the target will leave the last byte in
916      * the FIFO so don't reset ti_size in this case
917      */
918     if (s->dma || to_device) {
919         if (s->ti_size != 0) {
920             trace_esp_command_complete_unexpected();
921         }
922     }
923 
924     s->async_len = 0;
925     if (req->status) {
926         trace_esp_command_complete_fail();
927     }
928     s->status = req->status;
929 
930     /*
931      * Switch to status phase. For non-DMA transfers from the target the last
932      * byte is still in the FIFO
933      */
934     s->ti_size = 0;
935 
936     switch (s->rregs[ESP_CMD]) {
937     case CMD_SEL | CMD_DMA:
938     case CMD_SEL:
939     case CMD_SELATN | CMD_DMA:
940     case CMD_SELATN:
941         /*
942          * No data phase for sequencer command so raise deferred bus service
943          * and function complete interrupt
944          */
945         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
946         s->rregs[ESP_RSEQ] = SEQ_CD;
947         break;
948 
949     case CMD_TI | CMD_DMA:
950     case CMD_TI:
951         s->rregs[ESP_CMD] = 0;
952         break;
953     }
954 
955     /* Raise bus service interrupt to indicate change to STATUS phase */
956     esp_set_phase(s, STAT_ST);
957     s->rregs[ESP_RINTR] |= INTR_BS;
958     esp_raise_irq(s);
959 
960     /* Ensure DRQ is set correctly for TC underflow or normal completion */
961     esp_dma_ti_check(s);
962 
963     if (s->current_req) {
964         scsi_req_unref(s->current_req);
965         s->current_req = NULL;
966         s->current_dev = NULL;
967     }
968 }
969 
970 void esp_transfer_data(SCSIRequest *req, uint32_t len)
971 {
972     ESPState *s = req->hba_private;
973     uint32_t dmalen = esp_get_tc(s);
974 
975     trace_esp_transfer_data(dmalen, s->ti_size);
976     s->async_len = len;
977     s->async_buf = scsi_req_get_buf(req);
978 
979     if (!s->data_ready) {
980         s->data_ready = true;
981 
982         switch (s->rregs[ESP_CMD]) {
983         case CMD_SEL | CMD_DMA:
984         case CMD_SEL:
985         case CMD_SELATN | CMD_DMA:
986         case CMD_SELATN:
987             /*
988              * Initial incoming data xfer is complete for sequencer command
989              * so raise deferred bus service and function complete interrupt
990              */
991              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
992              s->rregs[ESP_RSEQ] = SEQ_CD;
993              break;
994 
995         case CMD_SELATNS | CMD_DMA:
996         case CMD_SELATNS:
997             /*
998              * Initial incoming data xfer is complete so raise command
999              * completion interrupt
1000              */
1001              s->rregs[ESP_RINTR] |= INTR_BS;
1002              s->rregs[ESP_RSEQ] = SEQ_MO;
1003              break;
1004 
1005         case CMD_TI | CMD_DMA:
1006         case CMD_TI:
1007             /*
1008              * Bus service interrupt raised because of initial change to
1009              * DATA phase
1010              */
1011             s->rregs[ESP_CMD] = 0;
1012             s->rregs[ESP_RINTR] |= INTR_BS;
1013             break;
1014         }
1015 
1016         esp_raise_irq(s);
1017     }
1018 
1019     /*
1020      * Always perform the initial transfer upon reception of the next TI
1021      * command to ensure the DMA/non-DMA status of the command is correct.
1022      * It is not possible to use s->dma directly in the section below as
1023      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1024      * async data transfer is delayed then s->dma is set incorrectly.
1025      */
1026 
1027     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1028         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1029         esp_dma_ti_check(s);
1030 
1031         esp_do_dma(s);
1032     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1033         esp_do_nodma(s);
1034     }
1035 }
1036 
1037 static void handle_ti(ESPState *s)
1038 {
1039     uint32_t dmalen;
1040 
1041     if (s->dma && !s->dma_enabled) {
1042         s->dma_cb = handle_ti;
1043         return;
1044     }
1045 
1046     if (s->dma) {
1047         dmalen = esp_get_tc(s);
1048         trace_esp_handle_ti(dmalen);
1049         esp_do_dma(s);
1050     } else {
1051         trace_esp_handle_ti(s->ti_size);
1052         esp_do_nodma(s);
1053 
1054         if (esp_get_phase(s) == STAT_DO) {
1055             esp_nodma_ti_dataout(s);
1056         }
1057     }
1058 }
1059 
1060 void esp_hard_reset(ESPState *s)
1061 {
1062     memset(s->rregs, 0, ESP_REGS);
1063     memset(s->wregs, 0, ESP_REGS);
1064     s->tchi_written = 0;
1065     s->ti_size = 0;
1066     s->async_len = 0;
1067     fifo8_reset(&s->fifo);
1068     fifo8_reset(&s->cmdfifo);
1069     s->dma = 0;
1070     s->dma_cb = NULL;
1071 
1072     s->rregs[ESP_CFG1] = 7;
1073 }
1074 
1075 static void esp_soft_reset(ESPState *s)
1076 {
1077     qemu_irq_lower(s->irq);
1078     qemu_irq_lower(s->drq_irq);
1079     esp_hard_reset(s);
1080 }
1081 
1082 static void esp_bus_reset(ESPState *s)
1083 {
1084     bus_cold_reset(BUS(&s->bus));
1085 }
1086 
1087 static void parent_esp_reset(ESPState *s, int irq, int level)
1088 {
1089     if (level) {
1090         esp_soft_reset(s);
1091     }
1092 }
1093 
1094 static void esp_run_cmd(ESPState *s)
1095 {
1096     uint8_t cmd = s->rregs[ESP_CMD];
1097 
1098     if (cmd & CMD_DMA) {
1099         s->dma = 1;
1100         /* Reload DMA counter.  */
1101         if (esp_get_stc(s) == 0) {
1102             esp_set_tc(s, 0x10000);
1103         } else {
1104             esp_set_tc(s, esp_get_stc(s));
1105         }
1106     } else {
1107         s->dma = 0;
1108     }
1109     switch (cmd & CMD_CMD) {
1110     case CMD_NOP:
1111         trace_esp_mem_writeb_cmd_nop(cmd);
1112         break;
1113     case CMD_FLUSH:
1114         trace_esp_mem_writeb_cmd_flush(cmd);
1115         fifo8_reset(&s->fifo);
1116         break;
1117     case CMD_RESET:
1118         trace_esp_mem_writeb_cmd_reset(cmd);
1119         esp_soft_reset(s);
1120         break;
1121     case CMD_BUSRESET:
1122         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1123         esp_bus_reset(s);
1124         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1125             s->rregs[ESP_RINTR] |= INTR_RST;
1126             esp_raise_irq(s);
1127         }
1128         break;
1129     case CMD_TI:
1130         trace_esp_mem_writeb_cmd_ti(cmd);
1131         handle_ti(s);
1132         break;
1133     case CMD_ICCS:
1134         trace_esp_mem_writeb_cmd_iccs(cmd);
1135         write_response(s);
1136         break;
1137     case CMD_MSGACC:
1138         trace_esp_mem_writeb_cmd_msgacc(cmd);
1139         s->rregs[ESP_RINTR] |= INTR_DC;
1140         s->rregs[ESP_RSEQ] = 0;
1141         s->rregs[ESP_RFLAGS] = 0;
1142         esp_raise_irq(s);
1143         break;
1144     case CMD_PAD:
1145         trace_esp_mem_writeb_cmd_pad(cmd);
1146         handle_pad(s);
1147         break;
1148     case CMD_SATN:
1149         trace_esp_mem_writeb_cmd_satn(cmd);
1150         break;
1151     case CMD_RSTATN:
1152         trace_esp_mem_writeb_cmd_rstatn(cmd);
1153         break;
1154     case CMD_SEL:
1155         trace_esp_mem_writeb_cmd_sel(cmd);
1156         handle_s_without_atn(s);
1157         break;
1158     case CMD_SELATN:
1159         trace_esp_mem_writeb_cmd_selatn(cmd);
1160         handle_satn(s);
1161         break;
1162     case CMD_SELATNS:
1163         trace_esp_mem_writeb_cmd_selatns(cmd);
1164         handle_satn_stop(s);
1165         break;
1166     case CMD_ENSEL:
1167         trace_esp_mem_writeb_cmd_ensel(cmd);
1168         s->rregs[ESP_RINTR] = 0;
1169         break;
1170     case CMD_DISSEL:
1171         trace_esp_mem_writeb_cmd_dissel(cmd);
1172         s->rregs[ESP_RINTR] = 0;
1173         esp_raise_irq(s);
1174         break;
1175     default:
1176         trace_esp_error_unhandled_command(cmd);
1177         break;
1178     }
1179 }
1180 
1181 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1182 {
1183     uint32_t val;
1184 
1185     switch (saddr) {
1186     case ESP_FIFO:
1187         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1188         val = s->rregs[ESP_FIFO];
1189         break;
1190     case ESP_RINTR:
1191         /*
1192          * Clear sequence step, interrupt register and all status bits
1193          * except TC
1194          */
1195         val = s->rregs[ESP_RINTR];
1196         s->rregs[ESP_RINTR] = 0;
1197         esp_lower_irq(s);
1198         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1199         /*
1200          * According to the datasheet ESP_RSEQ should be cleared, but as the
1201          * emulation currently defers information transfers to the next TI
1202          * command leave it for now so that pedantic guests such as the old
1203          * Linux 2.6 driver see the correct flags before the next SCSI phase
1204          * transition.
1205          *
1206          * s->rregs[ESP_RSEQ] = SEQ_0;
1207          */
1208         break;
1209     case ESP_TCHI:
1210         /* Return the unique id if the value has never been written */
1211         if (!s->tchi_written) {
1212             val = s->chip_id;
1213         } else {
1214             val = s->rregs[saddr];
1215         }
1216         break;
1217      case ESP_RFLAGS:
1218         /* Bottom 5 bits indicate number of bytes in FIFO */
1219         val = fifo8_num_used(&s->fifo);
1220         break;
1221     default:
1222         val = s->rregs[saddr];
1223         break;
1224     }
1225 
1226     trace_esp_mem_readb(saddr, val);
1227     return val;
1228 }
1229 
1230 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1231 {
1232     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1233     switch (saddr) {
1234     case ESP_TCHI:
1235         s->tchi_written = true;
1236         /* fall through */
1237     case ESP_TCLO:
1238     case ESP_TCMID:
1239         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1240         break;
1241     case ESP_FIFO:
1242         if (!fifo8_is_full(&s->fifo)) {
1243             esp_fifo_push(s, val);
1244         }
1245         esp_do_nodma(s);
1246         break;
1247     case ESP_CMD:
1248         s->rregs[saddr] = val;
1249         esp_run_cmd(s);
1250         break;
1251     case ESP_WBUSID ... ESP_WSYNO:
1252         break;
1253     case ESP_CFG1:
1254     case ESP_CFG2: case ESP_CFG3:
1255     case ESP_RES3: case ESP_RES4:
1256         s->rregs[saddr] = val;
1257         break;
1258     case ESP_WCCF ... ESP_WTEST:
1259         break;
1260     default:
1261         trace_esp_error_invalid_write(val, saddr);
1262         return;
1263     }
1264     s->wregs[saddr] = val;
1265 }
1266 
1267 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1268                             unsigned size, bool is_write,
1269                             MemTxAttrs attrs)
1270 {
1271     return (size == 1) || (is_write && size == 4);
1272 }
1273 
1274 static bool esp_is_before_version_5(void *opaque, int version_id)
1275 {
1276     ESPState *s = ESP(opaque);
1277 
1278     version_id = MIN(version_id, s->mig_version_id);
1279     return version_id < 5;
1280 }
1281 
1282 static bool esp_is_version_5(void *opaque, int version_id)
1283 {
1284     ESPState *s = ESP(opaque);
1285 
1286     version_id = MIN(version_id, s->mig_version_id);
1287     return version_id >= 5;
1288 }
1289 
1290 static bool esp_is_version_6(void *opaque, int version_id)
1291 {
1292     ESPState *s = ESP(opaque);
1293 
1294     version_id = MIN(version_id, s->mig_version_id);
1295     return version_id >= 6;
1296 }
1297 
1298 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1299 {
1300     ESPState *s = ESP(opaque);
1301 
1302     version_id = MIN(version_id, s->mig_version_id);
1303     return version_id >= 5 && version_id <= 6;
1304 }
1305 
1306 int esp_pre_save(void *opaque)
1307 {
1308     ESPState *s = ESP(object_resolve_path_component(
1309                       OBJECT(opaque), "esp"));
1310 
1311     s->mig_version_id = vmstate_esp.version_id;
1312     return 0;
1313 }
1314 
1315 static int esp_post_load(void *opaque, int version_id)
1316 {
1317     ESPState *s = ESP(opaque);
1318     int len, i;
1319 
1320     version_id = MIN(version_id, s->mig_version_id);
1321 
1322     if (version_id < 5) {
1323         esp_set_tc(s, s->mig_dma_left);
1324 
1325         /* Migrate ti_buf to fifo */
1326         len = s->mig_ti_wptr - s->mig_ti_rptr;
1327         for (i = 0; i < len; i++) {
1328             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1329         }
1330 
1331         /* Migrate cmdbuf to cmdfifo */
1332         for (i = 0; i < s->mig_cmdlen; i++) {
1333             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1334         }
1335     }
1336 
1337     s->mig_version_id = vmstate_esp.version_id;
1338     return 0;
1339 }
1340 
1341 const VMStateDescription vmstate_esp = {
1342     .name = "esp",
1343     .version_id = 7,
1344     .minimum_version_id = 3,
1345     .post_load = esp_post_load,
1346     .fields = (const VMStateField[]) {
1347         VMSTATE_BUFFER(rregs, ESPState),
1348         VMSTATE_BUFFER(wregs, ESPState),
1349         VMSTATE_INT32(ti_size, ESPState),
1350         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1351         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1352         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1353         VMSTATE_UINT32(status, ESPState),
1354         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1355                             esp_is_before_version_5),
1356         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1357                           esp_is_before_version_5),
1358         VMSTATE_UINT32(dma, ESPState),
1359         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1360                               esp_is_before_version_5, 0, 16),
1361         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1362                               esp_is_before_version_5, 16,
1363                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1364         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1365         VMSTATE_UINT32(do_cmd, ESPState),
1366         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1367         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1368         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1369         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1370         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1371         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1372                            esp_is_between_version_5_and_6),
1373         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1374         VMSTATE_BOOL(drq_state, ESPState),
1375         VMSTATE_END_OF_LIST()
1376     },
1377 };
1378 
1379 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1380                                  uint64_t val, unsigned int size)
1381 {
1382     SysBusESPState *sysbus = opaque;
1383     ESPState *s = ESP(&sysbus->esp);
1384     uint32_t saddr;
1385 
1386     saddr = addr >> sysbus->it_shift;
1387     esp_reg_write(s, saddr, val);
1388 }
1389 
1390 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1391                                     unsigned int size)
1392 {
1393     SysBusESPState *sysbus = opaque;
1394     ESPState *s = ESP(&sysbus->esp);
1395     uint32_t saddr;
1396 
1397     saddr = addr >> sysbus->it_shift;
1398     return esp_reg_read(s, saddr);
1399 }
1400 
1401 static const MemoryRegionOps sysbus_esp_mem_ops = {
1402     .read = sysbus_esp_mem_read,
1403     .write = sysbus_esp_mem_write,
1404     .endianness = DEVICE_NATIVE_ENDIAN,
1405     .valid.accepts = esp_mem_accepts,
1406 };
1407 
1408 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1409                                   uint64_t val, unsigned int size)
1410 {
1411     SysBusESPState *sysbus = opaque;
1412     ESPState *s = ESP(&sysbus->esp);
1413 
1414     trace_esp_pdma_write(size);
1415 
1416     switch (size) {
1417     case 1:
1418         esp_pdma_write(s, val);
1419         break;
1420     case 2:
1421         esp_pdma_write(s, val >> 8);
1422         esp_pdma_write(s, val);
1423         break;
1424     }
1425     esp_do_dma(s);
1426 }
1427 
1428 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1429                                      unsigned int size)
1430 {
1431     SysBusESPState *sysbus = opaque;
1432     ESPState *s = ESP(&sysbus->esp);
1433     uint64_t val = 0;
1434 
1435     trace_esp_pdma_read(size);
1436 
1437     switch (size) {
1438     case 1:
1439         val = esp_pdma_read(s);
1440         break;
1441     case 2:
1442         val = esp_pdma_read(s);
1443         val = (val << 8) | esp_pdma_read(s);
1444         break;
1445     }
1446     esp_do_dma(s);
1447     return val;
1448 }
1449 
1450 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1451 {
1452     ESPState *s = container_of(req->bus, ESPState, bus);
1453 
1454     scsi_req_ref(req);
1455     s->current_req = req;
1456     return s;
1457 }
1458 
1459 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1460     .read = sysbus_esp_pdma_read,
1461     .write = sysbus_esp_pdma_write,
1462     .endianness = DEVICE_NATIVE_ENDIAN,
1463     .valid.min_access_size = 1,
1464     .valid.max_access_size = 4,
1465     .impl.min_access_size = 1,
1466     .impl.max_access_size = 2,
1467 };
1468 
1469 static const struct SCSIBusInfo esp_scsi_info = {
1470     .tcq = false,
1471     .max_target = ESP_MAX_DEVS,
1472     .max_lun = 7,
1473 
1474     .load_request = esp_load_request,
1475     .transfer_data = esp_transfer_data,
1476     .complete = esp_command_complete,
1477     .cancel = esp_request_cancelled
1478 };
1479 
1480 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1481 {
1482     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1483     ESPState *s = ESP(&sysbus->esp);
1484 
1485     switch (irq) {
1486     case 0:
1487         parent_esp_reset(s, irq, level);
1488         break;
1489     case 1:
1490         esp_dma_enable(s, irq, level);
1491         break;
1492     }
1493 }
1494 
1495 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1496 {
1497     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1498     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1499     ESPState *s = ESP(&sysbus->esp);
1500 
1501     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1502         return;
1503     }
1504 
1505     sysbus_init_irq(sbd, &s->irq);
1506     sysbus_init_irq(sbd, &s->drq_irq);
1507     assert(sysbus->it_shift != -1);
1508 
1509     s->chip_id = TCHI_FAS100A;
1510     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1511                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1512     sysbus_init_mmio(sbd, &sysbus->iomem);
1513     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1514                           sysbus, "esp-pdma", 4);
1515     sysbus_init_mmio(sbd, &sysbus->pdma);
1516 
1517     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1518 
1519     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1520 }
1521 
1522 static void sysbus_esp_hard_reset(DeviceState *dev)
1523 {
1524     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1525     ESPState *s = ESP(&sysbus->esp);
1526 
1527     esp_hard_reset(s);
1528 }
1529 
1530 static void sysbus_esp_init(Object *obj)
1531 {
1532     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1533 
1534     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1535 }
1536 
1537 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1538     .name = "sysbusespscsi",
1539     .version_id = 2,
1540     .minimum_version_id = 1,
1541     .pre_save = esp_pre_save,
1542     .fields = (const VMStateField[]) {
1543         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1544         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1545         VMSTATE_END_OF_LIST()
1546     }
1547 };
1548 
1549 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1550 {
1551     DeviceClass *dc = DEVICE_CLASS(klass);
1552 
1553     dc->realize = sysbus_esp_realize;
1554     dc->reset = sysbus_esp_hard_reset;
1555     dc->vmsd = &vmstate_sysbus_esp_scsi;
1556     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1557 }
1558 
1559 static void esp_finalize(Object *obj)
1560 {
1561     ESPState *s = ESP(obj);
1562 
1563     fifo8_destroy(&s->fifo);
1564     fifo8_destroy(&s->cmdfifo);
1565 }
1566 
1567 static void esp_init(Object *obj)
1568 {
1569     ESPState *s = ESP(obj);
1570 
1571     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1572     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1573 }
1574 
1575 static void esp_class_init(ObjectClass *klass, void *data)
1576 {
1577     DeviceClass *dc = DEVICE_CLASS(klass);
1578 
1579     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1580     dc->user_creatable = false;
1581     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1582 }
1583 
1584 static const TypeInfo esp_info_types[] = {
1585     {
1586         .name          = TYPE_SYSBUS_ESP,
1587         .parent        = TYPE_SYS_BUS_DEVICE,
1588         .instance_init = sysbus_esp_init,
1589         .instance_size = sizeof(SysBusESPState),
1590         .class_init    = sysbus_esp_class_init,
1591     },
1592     {
1593         .name = TYPE_ESP,
1594         .parent = TYPE_DEVICE,
1595         .instance_init = esp_init,
1596         .instance_finalize = esp_finalize,
1597         .instance_size = sizeof(ESPState),
1598         .class_init = esp_class_init,
1599     },
1600 };
1601 
1602 DEFINE_TYPES(esp_info_types)
1603