xref: /qemu/hw/scsi/esp.c (revision 60c572502cbb89f1f46c2127794f956220e5dbab)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
200 static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
201 {
202     const uint8_t *buf;
203     uint32_t n, n2;
204     int len;
205 
206     if (maxlen == 0) {
207         return 0;
208     }
209 
210     len = maxlen;
211     buf = fifo8_pop_buf(fifo, len, &n);
212     if (dest) {
213         memcpy(dest, buf, n);
214     }
215 
216     /* Add FIFO wraparound if needed */
217     len -= n;
218     len = MIN(len, fifo8_num_used(fifo));
219     if (len) {
220         buf = fifo8_pop_buf(fifo, len, &n2);
221         if (dest) {
222             memcpy(&dest[n], buf, n2);
223         }
224         n += n2;
225     }
226 
227     return n;
228 }
229 
230 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
231 {
232     uint32_t len = esp_fifo8_pop_buf(&s->fifo, dest, maxlen);
233 
234     esp_update_drq(s);
235     return len;
236 }
237 
238 static uint32_t esp_get_tc(ESPState *s)
239 {
240     uint32_t dmalen;
241 
242     dmalen = s->rregs[ESP_TCLO];
243     dmalen |= s->rregs[ESP_TCMID] << 8;
244     dmalen |= s->rregs[ESP_TCHI] << 16;
245 
246     return dmalen;
247 }
248 
249 static void esp_set_tc(ESPState *s, uint32_t dmalen)
250 {
251     uint32_t old_tc = esp_get_tc(s);
252 
253     s->rregs[ESP_TCLO] = dmalen;
254     s->rregs[ESP_TCMID] = dmalen >> 8;
255     s->rregs[ESP_TCHI] = dmalen >> 16;
256 
257     if (old_tc && dmalen == 0) {
258         s->rregs[ESP_RSTAT] |= STAT_TC;
259     }
260 }
261 
262 static uint32_t esp_get_stc(ESPState *s)
263 {
264     uint32_t dmalen;
265 
266     dmalen = s->wregs[ESP_TCLO];
267     dmalen |= s->wregs[ESP_TCMID] << 8;
268     dmalen |= s->wregs[ESP_TCHI] << 16;
269 
270     return dmalen;
271 }
272 
273 static uint8_t esp_pdma_read(ESPState *s)
274 {
275     uint8_t val;
276 
277     val = esp_fifo_pop(s);
278     return val;
279 }
280 
281 static void esp_pdma_write(ESPState *s, uint8_t val)
282 {
283     uint32_t dmalen = esp_get_tc(s);
284 
285     esp_fifo_push(s, val);
286 
287     if (dmalen && s->drq_state) {
288         dmalen--;
289         esp_set_tc(s, dmalen);
290     }
291 }
292 
293 static int esp_select(ESPState *s)
294 {
295     int target;
296 
297     target = s->wregs[ESP_WBUSID] & BUSID_DID;
298 
299     s->ti_size = 0;
300     s->rregs[ESP_RSEQ] = SEQ_0;
301 
302     if (s->current_req) {
303         /* Started a new command before the old one finished. Cancel it. */
304         scsi_req_cancel(s->current_req);
305     }
306 
307     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
308     if (!s->current_dev) {
309         /* No such drive */
310         s->rregs[ESP_RSTAT] = 0;
311         s->rregs[ESP_RINTR] = INTR_DC;
312         esp_raise_irq(s);
313         return -1;
314     }
315 
316     /*
317      * Note that we deliberately don't raise the IRQ here: this will be done
318      * either in esp_transfer_data() or esp_command_complete()
319      */
320     return 0;
321 }
322 
323 static void esp_do_dma(ESPState *s);
324 static void esp_do_nodma(ESPState *s);
325 
326 static void do_command_phase(ESPState *s)
327 {
328     uint32_t cmdlen;
329     int32_t datalen;
330     SCSIDevice *current_lun;
331     uint8_t buf[ESP_CMDFIFO_SZ];
332 
333     trace_esp_do_command_phase(s->lun);
334     cmdlen = fifo8_num_used(&s->cmdfifo);
335     if (!cmdlen || !s->current_dev) {
336         return;
337     }
338     esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
339 
340     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
341     if (!current_lun) {
342         /* No such drive */
343         s->rregs[ESP_RSTAT] = 0;
344         s->rregs[ESP_RINTR] = INTR_DC;
345         s->rregs[ESP_RSEQ] = SEQ_0;
346         esp_raise_irq(s);
347         return;
348     }
349 
350     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
351     datalen = scsi_req_enqueue(s->current_req);
352     s->ti_size = datalen;
353     fifo8_reset(&s->cmdfifo);
354     s->data_ready = false;
355     if (datalen != 0) {
356         /*
357          * Switch to DATA phase but wait until initial data xfer is
358          * complete before raising the command completion interrupt
359          */
360         if (datalen > 0) {
361             esp_set_phase(s, STAT_DI);
362         } else {
363             esp_set_phase(s, STAT_DO);
364         }
365         scsi_req_continue(s->current_req);
366         return;
367     }
368 }
369 
370 static void do_message_phase(ESPState *s)
371 {
372     if (s->cmdfifo_cdb_offset) {
373         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
374                           fifo8_pop(&s->cmdfifo);
375 
376         trace_esp_do_identify(message);
377         s->lun = message & 7;
378         s->cmdfifo_cdb_offset--;
379     }
380 
381     /* Ignore extended messages for now */
382     if (s->cmdfifo_cdb_offset) {
383         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
384         esp_fifo8_pop_buf(&s->cmdfifo, NULL, len);
385         s->cmdfifo_cdb_offset = 0;
386     }
387 }
388 
389 static void do_cmd(ESPState *s)
390 {
391     do_message_phase(s);
392     assert(s->cmdfifo_cdb_offset == 0);
393     do_command_phase(s);
394 }
395 
396 static void handle_satn(ESPState *s)
397 {
398     if (s->dma && !s->dma_enabled) {
399         s->dma_cb = handle_satn;
400         return;
401     }
402 
403     if (esp_select(s) < 0) {
404         return;
405     }
406 
407     esp_set_phase(s, STAT_MO);
408 
409     if (s->dma) {
410         esp_do_dma(s);
411     } else {
412         esp_do_nodma(s);
413     }
414 }
415 
416 static void handle_s_without_atn(ESPState *s)
417 {
418     if (s->dma && !s->dma_enabled) {
419         s->dma_cb = handle_s_without_atn;
420         return;
421     }
422 
423     if (esp_select(s) < 0) {
424         return;
425     }
426 
427     esp_set_phase(s, STAT_CD);
428     s->cmdfifo_cdb_offset = 0;
429 
430     if (s->dma) {
431         esp_do_dma(s);
432     } else {
433         esp_do_nodma(s);
434     }
435 }
436 
437 static void handle_satn_stop(ESPState *s)
438 {
439     if (s->dma && !s->dma_enabled) {
440         s->dma_cb = handle_satn_stop;
441         return;
442     }
443 
444     if (esp_select(s) < 0) {
445         return;
446     }
447 
448     esp_set_phase(s, STAT_MO);
449     s->cmdfifo_cdb_offset = 0;
450 
451     if (s->dma) {
452         esp_do_dma(s);
453     } else {
454         esp_do_nodma(s);
455     }
456 }
457 
458 static void handle_pad(ESPState *s)
459 {
460     if (s->dma) {
461         esp_do_dma(s);
462     } else {
463         esp_do_nodma(s);
464     }
465 }
466 
467 static void write_response(ESPState *s)
468 {
469     trace_esp_write_response(s->status);
470 
471     if (s->dma) {
472         esp_do_dma(s);
473     } else {
474         esp_do_nodma(s);
475     }
476 }
477 
478 static bool esp_cdb_ready(ESPState *s)
479 {
480     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
481     const uint8_t *pbuf;
482     uint32_t n;
483     int cdblen;
484 
485     if (len <= 0) {
486         return false;
487     }
488 
489     pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n);
490     if (n < len) {
491         /*
492          * In normal use the cmdfifo should never wrap, but include this check
493          * to prevent a malicious guest from reading past the end of the
494          * cmdfifo data buffer below
495          */
496         return false;
497     }
498 
499     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
500 
501     return cdblen < 0 ? false : (len >= cdblen);
502 }
503 
504 static void esp_dma_ti_check(ESPState *s)
505 {
506     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
507         s->rregs[ESP_RINTR] |= INTR_BS;
508         esp_raise_irq(s);
509         esp_lower_drq(s);
510     }
511 }
512 
513 static void esp_do_dma(ESPState *s)
514 {
515     uint32_t len, cmdlen;
516     uint8_t buf[ESP_CMDFIFO_SZ];
517 
518     len = esp_get_tc(s);
519 
520     switch (esp_get_phase(s)) {
521     case STAT_MO:
522         if (s->dma_memory_read) {
523             len = MIN(len, fifo8_num_free(&s->cmdfifo));
524             s->dma_memory_read(s->dma_opaque, buf, len);
525             esp_set_tc(s, esp_get_tc(s) - len);
526         } else {
527             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
528             len = MIN(fifo8_num_free(&s->cmdfifo), len);
529             esp_raise_drq(s);
530         }
531 
532         fifo8_push_all(&s->cmdfifo, buf, len);
533         s->cmdfifo_cdb_offset += len;
534 
535         switch (s->rregs[ESP_CMD]) {
536         case CMD_SELATN | CMD_DMA:
537             if (fifo8_num_used(&s->cmdfifo) >= 1) {
538                 /* First byte received, switch to command phase */
539                 esp_set_phase(s, STAT_CD);
540                 s->rregs[ESP_RSEQ] = SEQ_CD;
541                 s->cmdfifo_cdb_offset = 1;
542 
543                 if (fifo8_num_used(&s->cmdfifo) > 1) {
544                     /* Process any additional command phase data */
545                     esp_do_dma(s);
546                 }
547             }
548             break;
549 
550         case CMD_SELATNS | CMD_DMA:
551             if (fifo8_num_used(&s->cmdfifo) == 1) {
552                 /* First byte received, stop in message out phase */
553                 s->rregs[ESP_RSEQ] = SEQ_MO;
554                 s->cmdfifo_cdb_offset = 1;
555 
556                 /* Raise command completion interrupt */
557                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
558                 esp_raise_irq(s);
559             }
560             break;
561 
562         case CMD_TI | CMD_DMA:
563             /* ATN remains asserted until TC == 0 */
564             if (esp_get_tc(s) == 0) {
565                 esp_set_phase(s, STAT_CD);
566                 s->rregs[ESP_CMD] = 0;
567                 s->rregs[ESP_RINTR] |= INTR_BS;
568                 esp_raise_irq(s);
569             }
570             break;
571         }
572         break;
573 
574     case STAT_CD:
575         cmdlen = fifo8_num_used(&s->cmdfifo);
576         trace_esp_do_dma(cmdlen, len);
577         if (s->dma_memory_read) {
578             len = MIN(len, fifo8_num_free(&s->cmdfifo));
579             s->dma_memory_read(s->dma_opaque, buf, len);
580             fifo8_push_all(&s->cmdfifo, buf, len);
581             esp_set_tc(s, esp_get_tc(s) - len);
582         } else {
583             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
584             len = MIN(fifo8_num_free(&s->cmdfifo), len);
585             fifo8_push_all(&s->cmdfifo, buf, len);
586             esp_raise_drq(s);
587         }
588         trace_esp_handle_ti_cmd(cmdlen);
589         s->ti_size = 0;
590         if (esp_get_tc(s) == 0) {
591             /* Command has been received */
592             do_cmd(s);
593         }
594         break;
595 
596     case STAT_DO:
597         if (!s->current_req) {
598             return;
599         }
600         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
601             /* Defer until data is available.  */
602             return;
603         }
604         if (len > s->async_len) {
605             len = s->async_len;
606         }
607 
608         switch (s->rregs[ESP_CMD]) {
609         case CMD_TI | CMD_DMA:
610             if (s->dma_memory_read) {
611                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
612                 esp_set_tc(s, esp_get_tc(s) - len);
613             } else {
614                 /* Copy FIFO data to device */
615                 len = MIN(s->async_len, ESP_FIFO_SZ);
616                 len = MIN(len, fifo8_num_used(&s->fifo));
617                 len = esp_fifo_pop_buf(s, s->async_buf, len);
618                 esp_raise_drq(s);
619             }
620 
621             s->async_buf += len;
622             s->async_len -= len;
623             s->ti_size += len;
624             break;
625 
626         case CMD_PAD | CMD_DMA:
627             /* Copy TC zero bytes into the incoming stream */
628             if (!s->dma_memory_read) {
629                 len = MIN(s->async_len, ESP_FIFO_SZ);
630                 len = MIN(len, fifo8_num_free(&s->fifo));
631             }
632 
633             memset(s->async_buf, 0, len);
634 
635             s->async_buf += len;
636             s->async_len -= len;
637             s->ti_size += len;
638             break;
639         }
640 
641         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
642             /* Defer until the scsi layer has completed */
643             scsi_req_continue(s->current_req);
644             return;
645         }
646 
647         esp_dma_ti_check(s);
648         break;
649 
650     case STAT_DI:
651         if (!s->current_req) {
652             return;
653         }
654         if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
655             /* Defer until data is available.  */
656             return;
657         }
658         if (len > s->async_len) {
659             len = s->async_len;
660         }
661 
662         switch (s->rregs[ESP_CMD]) {
663         case CMD_TI | CMD_DMA:
664             if (s->dma_memory_write) {
665                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
666             } else {
667                 /* Copy device data to FIFO */
668                 len = MIN(len, fifo8_num_free(&s->fifo));
669                 esp_fifo_push_buf(s, s->async_buf, len);
670                 esp_raise_drq(s);
671             }
672 
673             s->async_buf += len;
674             s->async_len -= len;
675             s->ti_size -= len;
676             esp_set_tc(s, esp_get_tc(s) - len);
677             break;
678 
679         case CMD_PAD | CMD_DMA:
680             /* Drop TC bytes from the incoming stream */
681             if (!s->dma_memory_write) {
682                 len = MIN(len, fifo8_num_free(&s->fifo));
683             }
684 
685             s->async_buf += len;
686             s->async_len -= len;
687             s->ti_size -= len;
688             esp_set_tc(s, esp_get_tc(s) - len);
689             break;
690         }
691 
692         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
693             /* If the guest underflows TC then terminate SCSI request */
694             scsi_req_continue(s->current_req);
695             return;
696         }
697 
698         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
699             /* Defer until the scsi layer has completed */
700             scsi_req_continue(s->current_req);
701             return;
702         }
703 
704         esp_dma_ti_check(s);
705         break;
706 
707     case STAT_ST:
708         switch (s->rregs[ESP_CMD]) {
709         case CMD_ICCS | CMD_DMA:
710             len = MIN(len, 1);
711 
712             if (len) {
713                 buf[0] = s->status;
714 
715                 if (s->dma_memory_write) {
716                     s->dma_memory_write(s->dma_opaque, buf, len);
717                 } else {
718                     esp_fifo_push_buf(s, buf, len);
719                 }
720 
721                 esp_set_tc(s, esp_get_tc(s) - len);
722                 esp_set_phase(s, STAT_MI);
723 
724                 if (esp_get_tc(s) > 0) {
725                     /* Process any message in phase data */
726                     esp_do_dma(s);
727                 }
728             }
729             break;
730 
731         default:
732             /* Consume remaining data if the guest underflows TC */
733             if (fifo8_num_used(&s->fifo) < 2) {
734                 s->rregs[ESP_RINTR] |= INTR_BS;
735                 esp_raise_irq(s);
736                 esp_lower_drq(s);
737             }
738             break;
739         }
740         break;
741 
742     case STAT_MI:
743         switch (s->rregs[ESP_CMD]) {
744         case CMD_ICCS | CMD_DMA:
745             len = MIN(len, 1);
746 
747             if (len) {
748                 buf[0] = 0;
749 
750                 if (s->dma_memory_write) {
751                     s->dma_memory_write(s->dma_opaque, buf, len);
752                 } else {
753                     esp_fifo_push_buf(s, buf, len);
754                 }
755 
756                 esp_set_tc(s, esp_get_tc(s) - len);
757 
758                 /* Raise end of command interrupt */
759                 s->rregs[ESP_RINTR] |= INTR_FC;
760                 esp_raise_irq(s);
761             }
762             break;
763         }
764         break;
765     }
766 }
767 
768 static void esp_nodma_ti_dataout(ESPState *s)
769 {
770     int len;
771 
772     if (!s->current_req) {
773         return;
774     }
775     if (s->async_len == 0) {
776         /* Defer until data is available.  */
777         return;
778     }
779     len = MIN(s->async_len, ESP_FIFO_SZ);
780     len = MIN(len, fifo8_num_used(&s->fifo));
781     esp_fifo_pop_buf(s, s->async_buf, len);
782     s->async_buf += len;
783     s->async_len -= len;
784     s->ti_size += len;
785 
786     if (s->async_len == 0) {
787         scsi_req_continue(s->current_req);
788         return;
789     }
790 
791     s->rregs[ESP_RINTR] |= INTR_BS;
792     esp_raise_irq(s);
793 }
794 
795 static void esp_do_nodma(ESPState *s)
796 {
797     uint8_t buf[ESP_FIFO_SZ];
798     uint32_t cmdlen;
799     int len;
800 
801     switch (esp_get_phase(s)) {
802     case STAT_MO:
803         switch (s->rregs[ESP_CMD]) {
804         case CMD_SELATN:
805             /* Copy FIFO into cmdfifo */
806             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
807             len = MIN(fifo8_num_free(&s->cmdfifo), len);
808             fifo8_push_all(&s->cmdfifo, buf, len);
809 
810             if (fifo8_num_used(&s->cmdfifo) >= 1) {
811                 /* First byte received, switch to command phase */
812                 esp_set_phase(s, STAT_CD);
813                 s->rregs[ESP_RSEQ] = SEQ_CD;
814                 s->cmdfifo_cdb_offset = 1;
815 
816                 if (fifo8_num_used(&s->cmdfifo) > 1) {
817                     /* Process any additional command phase data */
818                     esp_do_nodma(s);
819                 }
820             }
821             break;
822 
823         case CMD_SELATNS:
824             /* Copy one byte from FIFO into cmdfifo */
825             len = esp_fifo_pop_buf(s, buf,
826                                    MIN(fifo8_num_used(&s->fifo), 1));
827             len = MIN(fifo8_num_free(&s->cmdfifo), len);
828             fifo8_push_all(&s->cmdfifo, buf, len);
829 
830             if (fifo8_num_used(&s->cmdfifo) >= 1) {
831                 /* First byte received, stop in message out phase */
832                 s->rregs[ESP_RSEQ] = SEQ_MO;
833                 s->cmdfifo_cdb_offset = 1;
834 
835                 /* Raise command completion interrupt */
836                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
837                 esp_raise_irq(s);
838             }
839             break;
840 
841         case CMD_TI:
842             /* Copy FIFO into cmdfifo */
843             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
844             len = MIN(fifo8_num_free(&s->cmdfifo), len);
845             fifo8_push_all(&s->cmdfifo, buf, len);
846 
847             /* ATN remains asserted until FIFO empty */
848             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
849             esp_set_phase(s, STAT_CD);
850             s->rregs[ESP_CMD] = 0;
851             s->rregs[ESP_RINTR] |= INTR_BS;
852             esp_raise_irq(s);
853             break;
854         }
855         break;
856 
857     case STAT_CD:
858         switch (s->rregs[ESP_CMD]) {
859         case CMD_TI:
860             /* Copy FIFO into cmdfifo */
861             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
862             len = MIN(fifo8_num_free(&s->cmdfifo), len);
863             fifo8_push_all(&s->cmdfifo, buf, len);
864 
865             cmdlen = fifo8_num_used(&s->cmdfifo);
866             trace_esp_handle_ti_cmd(cmdlen);
867 
868             /* CDB may be transferred in one or more TI commands */
869             if (esp_cdb_ready(s)) {
870                 /* Command has been received */
871                 do_cmd(s);
872             } else {
873                 /*
874                  * If data was transferred from the FIFO then raise bus
875                  * service interrupt to indicate transfer complete. Otherwise
876                  * defer until the next FIFO write.
877                  */
878                 if (len) {
879                     /* Raise interrupt to indicate transfer complete */
880                     s->rregs[ESP_RINTR] |= INTR_BS;
881                     esp_raise_irq(s);
882                 }
883             }
884             break;
885 
886         case CMD_SEL | CMD_DMA:
887         case CMD_SELATN | CMD_DMA:
888             /* Copy FIFO into cmdfifo */
889             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
890             len = MIN(fifo8_num_free(&s->cmdfifo), len);
891             fifo8_push_all(&s->cmdfifo, buf, len);
892 
893             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
894             if (esp_cdb_ready(s)) {
895                 /* Command has been received */
896                 do_cmd(s);
897             }
898             break;
899 
900         case CMD_SEL:
901         case CMD_SELATN:
902             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
903             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
904             len = MIN(fifo8_num_free(&s->cmdfifo), len);
905             fifo8_push_all(&s->cmdfifo, buf, len);
906 
907             do_cmd(s);
908             break;
909         }
910         break;
911 
912     case STAT_DO:
913         /* Accumulate data in FIFO until non-DMA TI is executed */
914         break;
915 
916     case STAT_DI:
917         if (!s->current_req) {
918             return;
919         }
920         if (s->async_len == 0) {
921             /* Defer until data is available.  */
922             return;
923         }
924         if (fifo8_is_empty(&s->fifo)) {
925             esp_fifo_push(s, s->async_buf[0]);
926             s->async_buf++;
927             s->async_len--;
928             s->ti_size--;
929         }
930 
931         if (s->async_len == 0) {
932             scsi_req_continue(s->current_req);
933             return;
934         }
935 
936         /* If preloading the FIFO, defer until TI command issued */
937         if (s->rregs[ESP_CMD] != CMD_TI) {
938             return;
939         }
940 
941         s->rregs[ESP_RINTR] |= INTR_BS;
942         esp_raise_irq(s);
943         break;
944 
945     case STAT_ST:
946         switch (s->rregs[ESP_CMD]) {
947         case CMD_ICCS:
948             esp_fifo_push(s, s->status);
949             esp_set_phase(s, STAT_MI);
950 
951             /* Process any message in phase data */
952             esp_do_nodma(s);
953             break;
954         }
955         break;
956 
957     case STAT_MI:
958         switch (s->rregs[ESP_CMD]) {
959         case CMD_ICCS:
960             esp_fifo_push(s, 0);
961 
962             /* Raise end of command interrupt */
963             s->rregs[ESP_RINTR] |= INTR_FC;
964             esp_raise_irq(s);
965             break;
966         }
967         break;
968     }
969 }
970 
971 void esp_command_complete(SCSIRequest *req, size_t resid)
972 {
973     ESPState *s = req->hba_private;
974     int to_device = (esp_get_phase(s) == STAT_DO);
975 
976     trace_esp_command_complete();
977 
978     /*
979      * Non-DMA transfers from the target will leave the last byte in
980      * the FIFO so don't reset ti_size in this case
981      */
982     if (s->dma || to_device) {
983         if (s->ti_size != 0) {
984             trace_esp_command_complete_unexpected();
985         }
986     }
987 
988     s->async_len = 0;
989     if (req->status) {
990         trace_esp_command_complete_fail();
991     }
992     s->status = req->status;
993 
994     /*
995      * Switch to status phase. For non-DMA transfers from the target the last
996      * byte is still in the FIFO
997      */
998     s->ti_size = 0;
999 
1000     switch (s->rregs[ESP_CMD]) {
1001     case CMD_SEL | CMD_DMA:
1002     case CMD_SEL:
1003     case CMD_SELATN | CMD_DMA:
1004     case CMD_SELATN:
1005         /*
1006          * No data phase for sequencer command so raise deferred bus service
1007          * and function complete interrupt
1008          */
1009         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1010         s->rregs[ESP_RSEQ] = SEQ_CD;
1011         break;
1012 
1013     case CMD_TI | CMD_DMA:
1014     case CMD_TI:
1015         s->rregs[ESP_CMD] = 0;
1016         break;
1017     }
1018 
1019     /* Raise bus service interrupt to indicate change to STATUS phase */
1020     esp_set_phase(s, STAT_ST);
1021     s->rregs[ESP_RINTR] |= INTR_BS;
1022     esp_raise_irq(s);
1023 
1024     /* Ensure DRQ is set correctly for TC underflow or normal completion */
1025     esp_dma_ti_check(s);
1026 
1027     if (s->current_req) {
1028         scsi_req_unref(s->current_req);
1029         s->current_req = NULL;
1030         s->current_dev = NULL;
1031     }
1032 }
1033 
1034 void esp_transfer_data(SCSIRequest *req, uint32_t len)
1035 {
1036     ESPState *s = req->hba_private;
1037     uint32_t dmalen = esp_get_tc(s);
1038 
1039     trace_esp_transfer_data(dmalen, s->ti_size);
1040     s->async_len = len;
1041     s->async_buf = scsi_req_get_buf(req);
1042 
1043     if (!s->data_ready) {
1044         s->data_ready = true;
1045 
1046         switch (s->rregs[ESP_CMD]) {
1047         case CMD_SEL | CMD_DMA:
1048         case CMD_SEL:
1049         case CMD_SELATN | CMD_DMA:
1050         case CMD_SELATN:
1051             /*
1052              * Initial incoming data xfer is complete for sequencer command
1053              * so raise deferred bus service and function complete interrupt
1054              */
1055              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1056              s->rregs[ESP_RSEQ] = SEQ_CD;
1057              break;
1058 
1059         case CMD_SELATNS | CMD_DMA:
1060         case CMD_SELATNS:
1061             /*
1062              * Initial incoming data xfer is complete so raise command
1063              * completion interrupt
1064              */
1065              s->rregs[ESP_RINTR] |= INTR_BS;
1066              s->rregs[ESP_RSEQ] = SEQ_MO;
1067              break;
1068 
1069         case CMD_TI | CMD_DMA:
1070         case CMD_TI:
1071             /*
1072              * Bus service interrupt raised because of initial change to
1073              * DATA phase
1074              */
1075             s->rregs[ESP_CMD] = 0;
1076             s->rregs[ESP_RINTR] |= INTR_BS;
1077             break;
1078         }
1079 
1080         esp_raise_irq(s);
1081     }
1082 
1083     /*
1084      * Always perform the initial transfer upon reception of the next TI
1085      * command to ensure the DMA/non-DMA status of the command is correct.
1086      * It is not possible to use s->dma directly in the section below as
1087      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1088      * async data transfer is delayed then s->dma is set incorrectly.
1089      */
1090 
1091     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1092         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1093         esp_dma_ti_check(s);
1094 
1095         esp_do_dma(s);
1096     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1097         esp_do_nodma(s);
1098     }
1099 }
1100 
1101 static void handle_ti(ESPState *s)
1102 {
1103     uint32_t dmalen;
1104 
1105     if (s->dma && !s->dma_enabled) {
1106         s->dma_cb = handle_ti;
1107         return;
1108     }
1109 
1110     if (s->dma) {
1111         dmalen = esp_get_tc(s);
1112         trace_esp_handle_ti(dmalen);
1113         esp_do_dma(s);
1114     } else {
1115         trace_esp_handle_ti(s->ti_size);
1116         esp_do_nodma(s);
1117 
1118         if (esp_get_phase(s) == STAT_DO) {
1119             esp_nodma_ti_dataout(s);
1120         }
1121     }
1122 }
1123 
1124 void esp_hard_reset(ESPState *s)
1125 {
1126     memset(s->rregs, 0, ESP_REGS);
1127     memset(s->wregs, 0, ESP_REGS);
1128     s->tchi_written = 0;
1129     s->ti_size = 0;
1130     s->async_len = 0;
1131     fifo8_reset(&s->fifo);
1132     fifo8_reset(&s->cmdfifo);
1133     s->dma = 0;
1134     s->dma_cb = NULL;
1135 
1136     s->rregs[ESP_CFG1] = 7;
1137 }
1138 
1139 static void esp_soft_reset(ESPState *s)
1140 {
1141     qemu_irq_lower(s->irq);
1142     qemu_irq_lower(s->drq_irq);
1143     esp_hard_reset(s);
1144 }
1145 
1146 static void esp_bus_reset(ESPState *s)
1147 {
1148     bus_cold_reset(BUS(&s->bus));
1149 }
1150 
1151 static void parent_esp_reset(ESPState *s, int irq, int level)
1152 {
1153     if (level) {
1154         esp_soft_reset(s);
1155     }
1156 }
1157 
1158 static void esp_run_cmd(ESPState *s)
1159 {
1160     uint8_t cmd = s->rregs[ESP_CMD];
1161 
1162     if (cmd & CMD_DMA) {
1163         s->dma = 1;
1164         /* Reload DMA counter.  */
1165         if (esp_get_stc(s) == 0) {
1166             esp_set_tc(s, 0x10000);
1167         } else {
1168             esp_set_tc(s, esp_get_stc(s));
1169         }
1170     } else {
1171         s->dma = 0;
1172     }
1173     switch (cmd & CMD_CMD) {
1174     case CMD_NOP:
1175         trace_esp_mem_writeb_cmd_nop(cmd);
1176         break;
1177     case CMD_FLUSH:
1178         trace_esp_mem_writeb_cmd_flush(cmd);
1179         fifo8_reset(&s->fifo);
1180         break;
1181     case CMD_RESET:
1182         trace_esp_mem_writeb_cmd_reset(cmd);
1183         esp_soft_reset(s);
1184         break;
1185     case CMD_BUSRESET:
1186         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1187         esp_bus_reset(s);
1188         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1189             s->rregs[ESP_RINTR] |= INTR_RST;
1190             esp_raise_irq(s);
1191         }
1192         break;
1193     case CMD_TI:
1194         trace_esp_mem_writeb_cmd_ti(cmd);
1195         handle_ti(s);
1196         break;
1197     case CMD_ICCS:
1198         trace_esp_mem_writeb_cmd_iccs(cmd);
1199         write_response(s);
1200         break;
1201     case CMD_MSGACC:
1202         trace_esp_mem_writeb_cmd_msgacc(cmd);
1203         s->rregs[ESP_RINTR] |= INTR_DC;
1204         s->rregs[ESP_RSEQ] = 0;
1205         s->rregs[ESP_RFLAGS] = 0;
1206         esp_raise_irq(s);
1207         break;
1208     case CMD_PAD:
1209         trace_esp_mem_writeb_cmd_pad(cmd);
1210         handle_pad(s);
1211         break;
1212     case CMD_SATN:
1213         trace_esp_mem_writeb_cmd_satn(cmd);
1214         break;
1215     case CMD_RSTATN:
1216         trace_esp_mem_writeb_cmd_rstatn(cmd);
1217         break;
1218     case CMD_SEL:
1219         trace_esp_mem_writeb_cmd_sel(cmd);
1220         handle_s_without_atn(s);
1221         break;
1222     case CMD_SELATN:
1223         trace_esp_mem_writeb_cmd_selatn(cmd);
1224         handle_satn(s);
1225         break;
1226     case CMD_SELATNS:
1227         trace_esp_mem_writeb_cmd_selatns(cmd);
1228         handle_satn_stop(s);
1229         break;
1230     case CMD_ENSEL:
1231         trace_esp_mem_writeb_cmd_ensel(cmd);
1232         s->rregs[ESP_RINTR] = 0;
1233         break;
1234     case CMD_DISSEL:
1235         trace_esp_mem_writeb_cmd_dissel(cmd);
1236         s->rregs[ESP_RINTR] = 0;
1237         esp_raise_irq(s);
1238         break;
1239     default:
1240         trace_esp_error_unhandled_command(cmd);
1241         break;
1242     }
1243 }
1244 
1245 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1246 {
1247     uint32_t val;
1248 
1249     switch (saddr) {
1250     case ESP_FIFO:
1251         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1252         val = s->rregs[ESP_FIFO];
1253         break;
1254     case ESP_RINTR:
1255         /*
1256          * Clear sequence step, interrupt register and all status bits
1257          * except TC
1258          */
1259         val = s->rregs[ESP_RINTR];
1260         s->rregs[ESP_RINTR] = 0;
1261         esp_lower_irq(s);
1262         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1263         /*
1264          * According to the datasheet ESP_RSEQ should be cleared, but as the
1265          * emulation currently defers information transfers to the next TI
1266          * command leave it for now so that pedantic guests such as the old
1267          * Linux 2.6 driver see the correct flags before the next SCSI phase
1268          * transition.
1269          *
1270          * s->rregs[ESP_RSEQ] = SEQ_0;
1271          */
1272         break;
1273     case ESP_TCHI:
1274         /* Return the unique id if the value has never been written */
1275         if (!s->tchi_written) {
1276             val = s->chip_id;
1277         } else {
1278             val = s->rregs[saddr];
1279         }
1280         break;
1281      case ESP_RFLAGS:
1282         /* Bottom 5 bits indicate number of bytes in FIFO */
1283         val = fifo8_num_used(&s->fifo);
1284         break;
1285     default:
1286         val = s->rregs[saddr];
1287         break;
1288     }
1289 
1290     trace_esp_mem_readb(saddr, val);
1291     return val;
1292 }
1293 
1294 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1295 {
1296     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1297     switch (saddr) {
1298     case ESP_TCHI:
1299         s->tchi_written = true;
1300         /* fall through */
1301     case ESP_TCLO:
1302     case ESP_TCMID:
1303         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1304         break;
1305     case ESP_FIFO:
1306         if (!fifo8_is_full(&s->fifo)) {
1307             esp_fifo_push(s, val);
1308         }
1309         esp_do_nodma(s);
1310         break;
1311     case ESP_CMD:
1312         s->rregs[saddr] = val;
1313         esp_run_cmd(s);
1314         break;
1315     case ESP_WBUSID ... ESP_WSYNO:
1316         break;
1317     case ESP_CFG1:
1318     case ESP_CFG2: case ESP_CFG3:
1319     case ESP_RES3: case ESP_RES4:
1320         s->rregs[saddr] = val;
1321         break;
1322     case ESP_WCCF ... ESP_WTEST:
1323         break;
1324     default:
1325         trace_esp_error_invalid_write(val, saddr);
1326         return;
1327     }
1328     s->wregs[saddr] = val;
1329 }
1330 
1331 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1332                             unsigned size, bool is_write,
1333                             MemTxAttrs attrs)
1334 {
1335     return (size == 1) || (is_write && size == 4);
1336 }
1337 
1338 static bool esp_is_before_version_5(void *opaque, int version_id)
1339 {
1340     ESPState *s = ESP(opaque);
1341 
1342     version_id = MIN(version_id, s->mig_version_id);
1343     return version_id < 5;
1344 }
1345 
1346 static bool esp_is_version_5(void *opaque, int version_id)
1347 {
1348     ESPState *s = ESP(opaque);
1349 
1350     version_id = MIN(version_id, s->mig_version_id);
1351     return version_id >= 5;
1352 }
1353 
1354 static bool esp_is_version_6(void *opaque, int version_id)
1355 {
1356     ESPState *s = ESP(opaque);
1357 
1358     version_id = MIN(version_id, s->mig_version_id);
1359     return version_id >= 6;
1360 }
1361 
1362 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1363 {
1364     ESPState *s = ESP(opaque);
1365 
1366     version_id = MIN(version_id, s->mig_version_id);
1367     return version_id >= 5 && version_id <= 6;
1368 }
1369 
1370 int esp_pre_save(void *opaque)
1371 {
1372     ESPState *s = ESP(object_resolve_path_component(
1373                       OBJECT(opaque), "esp"));
1374 
1375     s->mig_version_id = vmstate_esp.version_id;
1376     return 0;
1377 }
1378 
1379 static int esp_post_load(void *opaque, int version_id)
1380 {
1381     ESPState *s = ESP(opaque);
1382     int len, i;
1383 
1384     version_id = MIN(version_id, s->mig_version_id);
1385 
1386     if (version_id < 5) {
1387         esp_set_tc(s, s->mig_dma_left);
1388 
1389         /* Migrate ti_buf to fifo */
1390         len = s->mig_ti_wptr - s->mig_ti_rptr;
1391         for (i = 0; i < len; i++) {
1392             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1393         }
1394 
1395         /* Migrate cmdbuf to cmdfifo */
1396         for (i = 0; i < s->mig_cmdlen; i++) {
1397             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1398         }
1399     }
1400 
1401     s->mig_version_id = vmstate_esp.version_id;
1402     return 0;
1403 }
1404 
1405 const VMStateDescription vmstate_esp = {
1406     .name = "esp",
1407     .version_id = 7,
1408     .minimum_version_id = 3,
1409     .post_load = esp_post_load,
1410     .fields = (const VMStateField[]) {
1411         VMSTATE_BUFFER(rregs, ESPState),
1412         VMSTATE_BUFFER(wregs, ESPState),
1413         VMSTATE_INT32(ti_size, ESPState),
1414         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1415         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1416         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1417         VMSTATE_UINT32(status, ESPState),
1418         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1419                             esp_is_before_version_5),
1420         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1421                           esp_is_before_version_5),
1422         VMSTATE_UINT32(dma, ESPState),
1423         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1424                               esp_is_before_version_5, 0, 16),
1425         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1426                               esp_is_before_version_5, 16,
1427                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1428         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1429         VMSTATE_UINT32(do_cmd, ESPState),
1430         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1431         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1432         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1433         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1434         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1435         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1436                            esp_is_between_version_5_and_6),
1437         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1438         VMSTATE_BOOL(drq_state, ESPState),
1439         VMSTATE_END_OF_LIST()
1440     },
1441 };
1442 
1443 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1444                                  uint64_t val, unsigned int size)
1445 {
1446     SysBusESPState *sysbus = opaque;
1447     ESPState *s = ESP(&sysbus->esp);
1448     uint32_t saddr;
1449 
1450     saddr = addr >> sysbus->it_shift;
1451     esp_reg_write(s, saddr, val);
1452 }
1453 
1454 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1455                                     unsigned int size)
1456 {
1457     SysBusESPState *sysbus = opaque;
1458     ESPState *s = ESP(&sysbus->esp);
1459     uint32_t saddr;
1460 
1461     saddr = addr >> sysbus->it_shift;
1462     return esp_reg_read(s, saddr);
1463 }
1464 
1465 static const MemoryRegionOps sysbus_esp_mem_ops = {
1466     .read = sysbus_esp_mem_read,
1467     .write = sysbus_esp_mem_write,
1468     .endianness = DEVICE_NATIVE_ENDIAN,
1469     .valid.accepts = esp_mem_accepts,
1470 };
1471 
1472 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1473                                   uint64_t val, unsigned int size)
1474 {
1475     SysBusESPState *sysbus = opaque;
1476     ESPState *s = ESP(&sysbus->esp);
1477 
1478     trace_esp_pdma_write(size);
1479 
1480     switch (size) {
1481     case 1:
1482         esp_pdma_write(s, val);
1483         break;
1484     case 2:
1485         esp_pdma_write(s, val >> 8);
1486         esp_pdma_write(s, val);
1487         break;
1488     }
1489     esp_do_dma(s);
1490 }
1491 
1492 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1493                                      unsigned int size)
1494 {
1495     SysBusESPState *sysbus = opaque;
1496     ESPState *s = ESP(&sysbus->esp);
1497     uint64_t val = 0;
1498 
1499     trace_esp_pdma_read(size);
1500 
1501     switch (size) {
1502     case 1:
1503         val = esp_pdma_read(s);
1504         break;
1505     case 2:
1506         val = esp_pdma_read(s);
1507         val = (val << 8) | esp_pdma_read(s);
1508         break;
1509     }
1510     esp_do_dma(s);
1511     return val;
1512 }
1513 
1514 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1515 {
1516     ESPState *s = container_of(req->bus, ESPState, bus);
1517 
1518     scsi_req_ref(req);
1519     s->current_req = req;
1520     return s;
1521 }
1522 
1523 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1524     .read = sysbus_esp_pdma_read,
1525     .write = sysbus_esp_pdma_write,
1526     .endianness = DEVICE_NATIVE_ENDIAN,
1527     .valid.min_access_size = 1,
1528     .valid.max_access_size = 4,
1529     .impl.min_access_size = 1,
1530     .impl.max_access_size = 2,
1531 };
1532 
1533 static const struct SCSIBusInfo esp_scsi_info = {
1534     .tcq = false,
1535     .max_target = ESP_MAX_DEVS,
1536     .max_lun = 7,
1537 
1538     .load_request = esp_load_request,
1539     .transfer_data = esp_transfer_data,
1540     .complete = esp_command_complete,
1541     .cancel = esp_request_cancelled
1542 };
1543 
1544 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1545 {
1546     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1547     ESPState *s = ESP(&sysbus->esp);
1548 
1549     switch (irq) {
1550     case 0:
1551         parent_esp_reset(s, irq, level);
1552         break;
1553     case 1:
1554         esp_dma_enable(s, irq, level);
1555         break;
1556     }
1557 }
1558 
1559 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1560 {
1561     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1562     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1563     ESPState *s = ESP(&sysbus->esp);
1564 
1565     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1566         return;
1567     }
1568 
1569     sysbus_init_irq(sbd, &s->irq);
1570     sysbus_init_irq(sbd, &s->drq_irq);
1571     assert(sysbus->it_shift != -1);
1572 
1573     s->chip_id = TCHI_FAS100A;
1574     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1575                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1576     sysbus_init_mmio(sbd, &sysbus->iomem);
1577     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1578                           sysbus, "esp-pdma", 4);
1579     sysbus_init_mmio(sbd, &sysbus->pdma);
1580 
1581     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1582 
1583     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1584 }
1585 
1586 static void sysbus_esp_hard_reset(DeviceState *dev)
1587 {
1588     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1589     ESPState *s = ESP(&sysbus->esp);
1590 
1591     esp_hard_reset(s);
1592 }
1593 
1594 static void sysbus_esp_init(Object *obj)
1595 {
1596     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1597 
1598     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1599 }
1600 
1601 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1602     .name = "sysbusespscsi",
1603     .version_id = 2,
1604     .minimum_version_id = 1,
1605     .pre_save = esp_pre_save,
1606     .fields = (const VMStateField[]) {
1607         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1608         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1609         VMSTATE_END_OF_LIST()
1610     }
1611 };
1612 
1613 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1614 {
1615     DeviceClass *dc = DEVICE_CLASS(klass);
1616 
1617     dc->realize = sysbus_esp_realize;
1618     dc->reset = sysbus_esp_hard_reset;
1619     dc->vmsd = &vmstate_sysbus_esp_scsi;
1620     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1621 }
1622 
1623 static void esp_finalize(Object *obj)
1624 {
1625     ESPState *s = ESP(obj);
1626 
1627     fifo8_destroy(&s->fifo);
1628     fifo8_destroy(&s->cmdfifo);
1629 }
1630 
1631 static void esp_init(Object *obj)
1632 {
1633     ESPState *s = ESP(obj);
1634 
1635     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1636     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1637 }
1638 
1639 static void esp_class_init(ObjectClass *klass, void *data)
1640 {
1641     DeviceClass *dc = DEVICE_CLASS(klass);
1642 
1643     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1644     dc->user_creatable = false;
1645     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1646 }
1647 
1648 static const TypeInfo esp_info_types[] = {
1649     {
1650         .name          = TYPE_SYSBUS_ESP,
1651         .parent        = TYPE_SYS_BUS_DEVICE,
1652         .instance_init = sysbus_esp_init,
1653         .instance_size = sizeof(SysBusESPState),
1654         .class_init    = sysbus_esp_class_init,
1655     },
1656     {
1657         .name = TYPE_ESP,
1658         .parent = TYPE_DEVICE,
1659         .instance_init = esp_init,
1660         .instance_finalize = esp_finalize,
1661         .instance_size = sizeof(ESPState),
1662         .class_init = esp_class_init,
1663     },
1664 };
1665 
1666 DEFINE_TYPES(esp_info_types)
1667