xref: /qemu/hw/scsi/esp.c (revision 6fcae98b2c3923888c238e1ba49b10f4f681ae32)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98         s->async_len = 0;
99     }
100 }
101 
102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103 {
104     if (fifo8_num_used(fifo) == fifo->capacity) {
105         trace_esp_error_fifo_overrun();
106         return;
107     }
108 
109     fifo8_push(fifo, val);
110 }
111 
112 static uint8_t esp_fifo_pop(Fifo8 *fifo)
113 {
114     if (fifo8_is_empty(fifo)) {
115         return 0;
116     }
117 
118     return fifo8_pop(fifo);
119 }
120 
121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122 {
123     const uint8_t *buf;
124     uint32_t n, n2;
125     int len;
126 
127     if (maxlen == 0) {
128         return 0;
129     }
130 
131     len = maxlen;
132     buf = fifo8_pop_buf(fifo, len, &n);
133     if (dest) {
134         memcpy(dest, buf, n);
135     }
136 
137     /* Add FIFO wraparound if needed */
138     len -= n;
139     len = MIN(len, fifo8_num_used(fifo));
140     if (len) {
141         buf = fifo8_pop_buf(fifo, len, &n2);
142         if (dest) {
143             memcpy(&dest[n], buf, n2);
144         }
145         n += n2;
146     }
147 
148     return n;
149 }
150 
151 static uint32_t esp_get_tc(ESPState *s)
152 {
153     uint32_t dmalen;
154 
155     dmalen = s->rregs[ESP_TCLO];
156     dmalen |= s->rregs[ESP_TCMID] << 8;
157     dmalen |= s->rregs[ESP_TCHI] << 16;
158 
159     return dmalen;
160 }
161 
162 static void esp_set_tc(ESPState *s, uint32_t dmalen)
163 {
164     s->rregs[ESP_TCLO] = dmalen;
165     s->rregs[ESP_TCMID] = dmalen >> 8;
166     s->rregs[ESP_TCHI] = dmalen >> 16;
167 }
168 
169 static uint32_t esp_get_stc(ESPState *s)
170 {
171     uint32_t dmalen;
172 
173     dmalen = s->wregs[ESP_TCLO];
174     dmalen |= s->wregs[ESP_TCMID] << 8;
175     dmalen |= s->wregs[ESP_TCHI] << 16;
176 
177     return dmalen;
178 }
179 
180 static uint8_t esp_pdma_read(ESPState *s)
181 {
182     uint8_t val;
183 
184     if (s->do_cmd) {
185         val = esp_fifo_pop(&s->cmdfifo);
186     } else {
187         val = esp_fifo_pop(&s->fifo);
188     }
189 
190     return val;
191 }
192 
193 static void esp_pdma_write(ESPState *s, uint8_t val)
194 {
195     uint32_t dmalen = esp_get_tc(s);
196 
197     if (dmalen == 0) {
198         return;
199     }
200 
201     if (s->do_cmd) {
202         esp_fifo_push(&s->cmdfifo, val);
203     } else {
204         esp_fifo_push(&s->fifo, val);
205     }
206 
207     dmalen--;
208     esp_set_tc(s, dmalen);
209 }
210 
211 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
212 {
213     s->pdma_cb = cb;
214 }
215 
216 static int esp_select(ESPState *s)
217 {
218     int target;
219 
220     target = s->wregs[ESP_WBUSID] & BUSID_DID;
221 
222     s->ti_size = 0;
223 
224     if (s->current_req) {
225         /* Started a new command before the old one finished. Cancel it. */
226         scsi_req_cancel(s->current_req);
227     }
228 
229     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
230     if (!s->current_dev) {
231         /* No such drive */
232         s->rregs[ESP_RSTAT] = 0;
233         s->rregs[ESP_RINTR] = INTR_DC;
234         s->rregs[ESP_RSEQ] = SEQ_0;
235         esp_raise_irq(s);
236         return -1;
237     }
238 
239     /*
240      * Note that we deliberately don't raise the IRQ here: this will be done
241      * either in do_command_phase() for DATA OUT transfers or by the deferred
242      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
243      */
244     s->rregs[ESP_RINTR] |= INTR_FC;
245     s->rregs[ESP_RSEQ] = SEQ_CD;
246     return 0;
247 }
248 
249 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
250 {
251     uint8_t buf[ESP_CMDFIFO_SZ];
252     uint32_t dmalen, n;
253     int target;
254 
255     target = s->wregs[ESP_WBUSID] & BUSID_DID;
256     if (s->dma) {
257         dmalen = MIN(esp_get_tc(s), maxlen);
258         if (dmalen == 0) {
259             return 0;
260         }
261         if (s->dma_memory_read) {
262             s->dma_memory_read(s->dma_opaque, buf, dmalen);
263             dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
264             fifo8_push_all(&s->cmdfifo, buf, dmalen);
265         } else {
266             if (esp_select(s) < 0) {
267                 return -1;
268             }
269             esp_raise_drq(s);
270             return 0;
271         }
272     } else {
273         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
274         if (dmalen == 0) {
275             return 0;
276         }
277         n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
278         n = MIN(fifo8_num_free(&s->cmdfifo), n);
279         fifo8_push_all(&s->cmdfifo, buf, n);
280     }
281     trace_esp_get_cmd(dmalen, target);
282 
283     if (esp_select(s) < 0) {
284         return -1;
285     }
286     return dmalen;
287 }
288 
289 static void do_command_phase(ESPState *s)
290 {
291     uint32_t cmdlen;
292     int32_t datalen;
293     SCSIDevice *current_lun;
294     uint8_t buf[ESP_CMDFIFO_SZ];
295 
296     trace_esp_do_command_phase(s->lun);
297     cmdlen = fifo8_num_used(&s->cmdfifo);
298     if (!cmdlen || !s->current_dev) {
299         return;
300     }
301     esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
302 
303     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
304     if (!current_lun) {
305         /* No such drive */
306         s->rregs[ESP_RSTAT] = 0;
307         s->rregs[ESP_RINTR] = INTR_DC;
308         s->rregs[ESP_RSEQ] = SEQ_0;
309         esp_raise_irq(s);
310         return;
311     }
312 
313     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
314     datalen = scsi_req_enqueue(s->current_req);
315     s->ti_size = datalen;
316     fifo8_reset(&s->cmdfifo);
317     if (datalen != 0) {
318         s->rregs[ESP_RSTAT] = STAT_TC;
319         s->rregs[ESP_RSEQ] = SEQ_CD;
320         s->ti_cmd = 0;
321         esp_set_tc(s, 0);
322         if (datalen > 0) {
323             /*
324              * Switch to DATA IN phase but wait until initial data xfer is
325              * complete before raising the command completion interrupt
326              */
327             s->data_in_ready = false;
328             s->rregs[ESP_RSTAT] |= STAT_DI;
329         } else {
330             s->rregs[ESP_RSTAT] |= STAT_DO;
331             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
332             esp_raise_irq(s);
333             esp_lower_drq(s);
334         }
335         scsi_req_continue(s->current_req);
336         return;
337     }
338 }
339 
340 static void do_message_phase(ESPState *s)
341 {
342     if (s->cmdfifo_cdb_offset) {
343         uint8_t message = esp_fifo_pop(&s->cmdfifo);
344 
345         trace_esp_do_identify(message);
346         s->lun = message & 7;
347         s->cmdfifo_cdb_offset--;
348     }
349 
350     /* Ignore extended messages for now */
351     if (s->cmdfifo_cdb_offset) {
352         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
353         esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
354         s->cmdfifo_cdb_offset = 0;
355     }
356 }
357 
358 static void do_cmd(ESPState *s)
359 {
360     do_message_phase(s);
361     assert(s->cmdfifo_cdb_offset == 0);
362     do_command_phase(s);
363 }
364 
365 static void satn_pdma_cb(ESPState *s)
366 {
367     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
368         s->cmdfifo_cdb_offset = 1;
369         s->do_cmd = 0;
370         do_cmd(s);
371     }
372 }
373 
374 static void handle_satn(ESPState *s)
375 {
376     int32_t cmdlen;
377 
378     if (s->dma && !s->dma_enabled) {
379         s->dma_cb = handle_satn;
380         return;
381     }
382     esp_set_pdma_cb(s, SATN_PDMA_CB);
383     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
384     if (cmdlen > 0) {
385         s->cmdfifo_cdb_offset = 1;
386         s->do_cmd = 0;
387         do_cmd(s);
388     } else if (cmdlen == 0) {
389         s->do_cmd = 1;
390         /* Target present, but no cmd yet - switch to command phase */
391         s->rregs[ESP_RSEQ] = SEQ_CD;
392         s->rregs[ESP_RSTAT] = STAT_CD;
393     }
394 }
395 
396 static void s_without_satn_pdma_cb(ESPState *s)
397 {
398     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
399         s->cmdfifo_cdb_offset = 0;
400         s->do_cmd = 0;
401         do_cmd(s);
402     }
403 }
404 
405 static void handle_s_without_atn(ESPState *s)
406 {
407     int32_t cmdlen;
408 
409     if (s->dma && !s->dma_enabled) {
410         s->dma_cb = handle_s_without_atn;
411         return;
412     }
413     esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
414     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
415     if (cmdlen > 0) {
416         s->cmdfifo_cdb_offset = 0;
417         s->do_cmd = 0;
418         do_cmd(s);
419     } else if (cmdlen == 0) {
420         s->do_cmd = 1;
421         /* Target present, but no cmd yet - switch to command phase */
422         s->rregs[ESP_RSEQ] = SEQ_CD;
423         s->rregs[ESP_RSTAT] = STAT_CD;
424     }
425 }
426 
427 static void satn_stop_pdma_cb(ESPState *s)
428 {
429     if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
430         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
431         s->do_cmd = 1;
432         s->cmdfifo_cdb_offset = 1;
433         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
434         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
435         s->rregs[ESP_RSEQ] = SEQ_CD;
436         esp_raise_irq(s);
437     }
438 }
439 
440 static void handle_satn_stop(ESPState *s)
441 {
442     int32_t cmdlen;
443 
444     if (s->dma && !s->dma_enabled) {
445         s->dma_cb = handle_satn_stop;
446         return;
447     }
448     esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
449     cmdlen = get_cmd(s, 1);
450     if (cmdlen > 0) {
451         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
452         s->do_cmd = 1;
453         s->cmdfifo_cdb_offset = 1;
454         s->rregs[ESP_RSTAT] = STAT_MO;
455         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
456         s->rregs[ESP_RSEQ] = SEQ_MO;
457         esp_raise_irq(s);
458     } else if (cmdlen == 0) {
459         s->do_cmd = 1;
460         /* Target present, switch to message out phase */
461         s->rregs[ESP_RSEQ] = SEQ_MO;
462         s->rregs[ESP_RSTAT] = STAT_MO;
463     }
464 }
465 
466 static void write_response_pdma_cb(ESPState *s)
467 {
468     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
469     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
470     s->rregs[ESP_RSEQ] = SEQ_CD;
471     esp_raise_irq(s);
472 }
473 
474 static void write_response(ESPState *s)
475 {
476     uint8_t buf[2];
477 
478     trace_esp_write_response(s->status);
479 
480     buf[0] = s->status;
481     buf[1] = 0;
482 
483     if (s->dma) {
484         if (s->dma_memory_write) {
485             s->dma_memory_write(s->dma_opaque, buf, 2);
486             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
487             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
488             s->rregs[ESP_RSEQ] = SEQ_CD;
489         } else {
490             esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
491             esp_raise_drq(s);
492             return;
493         }
494     } else {
495         fifo8_reset(&s->fifo);
496         fifo8_push_all(&s->fifo, buf, 2);
497         s->rregs[ESP_RFLAGS] = 2;
498     }
499     esp_raise_irq(s);
500 }
501 
502 static void esp_dma_done(ESPState *s)
503 {
504     s->rregs[ESP_RSTAT] |= STAT_TC;
505     s->rregs[ESP_RINTR] |= INTR_BS;
506     s->rregs[ESP_RFLAGS] = 0;
507     esp_set_tc(s, 0);
508     esp_raise_irq(s);
509 }
510 
511 static void do_dma_pdma_cb(ESPState *s)
512 {
513     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
514     int len;
515     uint32_t n;
516 
517     if (s->do_cmd) {
518         /* Ensure we have received complete command after SATN and stop */
519         if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
520             return;
521         }
522 
523         s->ti_size = 0;
524         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
525             /* No command received */
526             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
527                 return;
528             }
529 
530             /* Command has been received */
531             s->do_cmd = 0;
532             do_cmd(s);
533         } else {
534             /*
535              * Extra message out bytes received: update cmdfifo_cdb_offset
536              * and then switch to command phase
537              */
538             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
539             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
540             s->rregs[ESP_RSEQ] = SEQ_CD;
541             s->rregs[ESP_RINTR] |= INTR_BS;
542             esp_raise_irq(s);
543         }
544         return;
545     }
546 
547     if (!s->current_req) {
548         return;
549     }
550 
551     if (to_device) {
552         /* Copy FIFO data to device */
553         len = MIN(s->async_len, ESP_FIFO_SZ);
554         len = MIN(len, fifo8_num_used(&s->fifo));
555         n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
556         s->async_buf += n;
557         s->async_len -= n;
558         s->ti_size += n;
559 
560         if (n < len) {
561             /* Unaligned accesses can cause FIFO wraparound */
562             len = len - n;
563             n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
564             s->async_buf += n;
565             s->async_len -= n;
566             s->ti_size += n;
567         }
568 
569         if (s->async_len == 0) {
570             scsi_req_continue(s->current_req);
571             return;
572         }
573 
574         if (esp_get_tc(s) == 0) {
575             esp_lower_drq(s);
576             esp_dma_done(s);
577         }
578 
579         return;
580     } else {
581         if (s->async_len == 0) {
582             /* Defer until the scsi layer has completed */
583             scsi_req_continue(s->current_req);
584             s->data_in_ready = false;
585             return;
586         }
587 
588         if (esp_get_tc(s) != 0) {
589             /* Copy device data to FIFO */
590             len = MIN(s->async_len, esp_get_tc(s));
591             len = MIN(len, fifo8_num_free(&s->fifo));
592             fifo8_push_all(&s->fifo, s->async_buf, len);
593             s->async_buf += len;
594             s->async_len -= len;
595             s->ti_size -= len;
596             esp_set_tc(s, esp_get_tc(s) - len);
597 
598             if (esp_get_tc(s) == 0) {
599                 /* Indicate transfer to FIFO is complete */
600                  s->rregs[ESP_RSTAT] |= STAT_TC;
601             }
602             return;
603         }
604 
605         /* Partially filled a scsi buffer. Complete immediately.  */
606         esp_lower_drq(s);
607         esp_dma_done(s);
608     }
609 }
610 
611 static void esp_do_dma(ESPState *s)
612 {
613     uint32_t len, cmdlen;
614     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
615     uint8_t buf[ESP_CMDFIFO_SZ];
616 
617     len = esp_get_tc(s);
618     if (s->do_cmd) {
619         /*
620          * handle_ti_cmd() case: esp_do_dma() is called only from
621          * handle_ti_cmd() with do_cmd != NULL (see the assert())
622          */
623         cmdlen = fifo8_num_used(&s->cmdfifo);
624         trace_esp_do_dma(cmdlen, len);
625         if (s->dma_memory_read) {
626             len = MIN(len, fifo8_num_free(&s->cmdfifo));
627             s->dma_memory_read(s->dma_opaque, buf, len);
628             fifo8_push_all(&s->cmdfifo, buf, len);
629         } else {
630             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
631             esp_raise_drq(s);
632             return;
633         }
634         trace_esp_handle_ti_cmd(cmdlen);
635         s->ti_size = 0;
636         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
637             /* No command received */
638             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
639                 return;
640             }
641 
642             /* Command has been received */
643             s->do_cmd = 0;
644             do_cmd(s);
645         } else {
646             /*
647              * Extra message out bytes received: update cmdfifo_cdb_offset
648              * and then switch to command phase
649              */
650             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
651             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
652             s->rregs[ESP_RSEQ] = SEQ_CD;
653             s->rregs[ESP_RINTR] |= INTR_BS;
654             esp_raise_irq(s);
655         }
656         return;
657     }
658     if (!s->current_req) {
659         return;
660     }
661     if (s->async_len == 0) {
662         /* Defer until data is available.  */
663         return;
664     }
665     if (len > s->async_len) {
666         len = s->async_len;
667     }
668     if (to_device) {
669         if (s->dma_memory_read) {
670             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
671         } else {
672             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
673             esp_raise_drq(s);
674             return;
675         }
676     } else {
677         if (s->dma_memory_write) {
678             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
679         } else {
680             /* Adjust TC for any leftover data in the FIFO */
681             if (!fifo8_is_empty(&s->fifo)) {
682                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
683             }
684 
685             /* Copy device data to FIFO */
686             len = MIN(len, fifo8_num_free(&s->fifo));
687             fifo8_push_all(&s->fifo, s->async_buf, len);
688             s->async_buf += len;
689             s->async_len -= len;
690             s->ti_size -= len;
691 
692             /*
693              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
694              * commands shorter than this must be padded accordingly
695              */
696             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
697                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
698                     esp_fifo_push(&s->fifo, 0);
699                     len++;
700                 }
701             }
702 
703             esp_set_tc(s, esp_get_tc(s) - len);
704             esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
705             esp_raise_drq(s);
706 
707             /* Indicate transfer to FIFO is complete */
708             s->rregs[ESP_RSTAT] |= STAT_TC;
709             return;
710         }
711     }
712     esp_set_tc(s, esp_get_tc(s) - len);
713     s->async_buf += len;
714     s->async_len -= len;
715     if (to_device) {
716         s->ti_size += len;
717     } else {
718         s->ti_size -= len;
719     }
720     if (s->async_len == 0) {
721         scsi_req_continue(s->current_req);
722         /*
723          * If there is still data to be read from the device then
724          * complete the DMA operation immediately.  Otherwise defer
725          * until the scsi layer has completed.
726          */
727         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
728             return;
729         }
730     }
731 
732     /* Partially filled a scsi buffer. Complete immediately.  */
733     esp_dma_done(s);
734     esp_lower_drq(s);
735 }
736 
737 static void esp_do_nodma(ESPState *s)
738 {
739     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
740     uint32_t cmdlen;
741     int len;
742 
743     if (s->do_cmd) {
744         cmdlen = fifo8_num_used(&s->cmdfifo);
745         trace_esp_handle_ti_cmd(cmdlen);
746         s->ti_size = 0;
747         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
748             /* No command received */
749             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
750                 return;
751             }
752 
753             /* Command has been received */
754             s->do_cmd = 0;
755             do_cmd(s);
756         } else {
757             /*
758              * Extra message out bytes received: update cmdfifo_cdb_offset
759              * and then switch to command phase
760              */
761             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
762             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
763             s->rregs[ESP_RSEQ] = SEQ_CD;
764             s->rregs[ESP_RINTR] |= INTR_BS;
765             esp_raise_irq(s);
766         }
767         return;
768     }
769 
770     if (!s->current_req) {
771         return;
772     }
773 
774     if (s->async_len == 0) {
775         /* Defer until data is available.  */
776         return;
777     }
778 
779     if (to_device) {
780         len = MIN(s->async_len, ESP_FIFO_SZ);
781         len = MIN(len, fifo8_num_used(&s->fifo));
782         esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
783         s->async_buf += len;
784         s->async_len -= len;
785         s->ti_size += len;
786     } else {
787         if (fifo8_is_empty(&s->fifo)) {
788             fifo8_push(&s->fifo, s->async_buf[0]);
789             s->async_buf++;
790             s->async_len--;
791             s->ti_size--;
792         }
793     }
794 
795     if (s->async_len == 0) {
796         scsi_req_continue(s->current_req);
797         return;
798     }
799 
800     s->rregs[ESP_RINTR] |= INTR_BS;
801     esp_raise_irq(s);
802 }
803 
804 static void esp_pdma_cb(ESPState *s)
805 {
806     switch (s->pdma_cb) {
807     case SATN_PDMA_CB:
808         satn_pdma_cb(s);
809         break;
810     case S_WITHOUT_SATN_PDMA_CB:
811         s_without_satn_pdma_cb(s);
812         break;
813     case SATN_STOP_PDMA_CB:
814         satn_stop_pdma_cb(s);
815         break;
816     case WRITE_RESPONSE_PDMA_CB:
817         write_response_pdma_cb(s);
818         break;
819     case DO_DMA_PDMA_CB:
820         do_dma_pdma_cb(s);
821         break;
822     default:
823         g_assert_not_reached();
824     }
825 }
826 
827 void esp_command_complete(SCSIRequest *req, size_t resid)
828 {
829     ESPState *s = req->hba_private;
830     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
831 
832     trace_esp_command_complete();
833 
834     /*
835      * Non-DMA transfers from the target will leave the last byte in
836      * the FIFO so don't reset ti_size in this case
837      */
838     if (s->dma || to_device) {
839         if (s->ti_size != 0) {
840             trace_esp_command_complete_unexpected();
841         }
842         s->ti_size = 0;
843     }
844 
845     s->async_len = 0;
846     if (req->status) {
847         trace_esp_command_complete_fail();
848     }
849     s->status = req->status;
850 
851     /*
852      * If the transfer is finished, switch to status phase. For non-DMA
853      * transfers from the target the last byte is still in the FIFO
854      */
855     if (s->ti_size == 0) {
856         s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
857         esp_dma_done(s);
858         esp_lower_drq(s);
859     }
860 
861     if (s->current_req) {
862         scsi_req_unref(s->current_req);
863         s->current_req = NULL;
864         s->current_dev = NULL;
865     }
866 }
867 
868 void esp_transfer_data(SCSIRequest *req, uint32_t len)
869 {
870     ESPState *s = req->hba_private;
871     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
872     uint32_t dmalen = esp_get_tc(s);
873 
874     assert(!s->do_cmd);
875     trace_esp_transfer_data(dmalen, s->ti_size);
876     s->async_len = len;
877     s->async_buf = scsi_req_get_buf(req);
878 
879     if (!to_device && !s->data_in_ready) {
880         /*
881          * Initial incoming data xfer is complete so raise command
882          * completion interrupt
883          */
884         s->data_in_ready = true;
885         s->rregs[ESP_RSTAT] |= STAT_TC;
886         s->rregs[ESP_RINTR] |= INTR_BS;
887         esp_raise_irq(s);
888     }
889 
890     if (s->ti_cmd == 0) {
891         /*
892          * Always perform the initial transfer upon reception of the next TI
893          * command to ensure the DMA/non-DMA status of the command is correct.
894          * It is not possible to use s->dma directly in the section below as
895          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
896          * async data transfer is delayed then s->dma is set incorrectly.
897          */
898         return;
899     }
900 
901     if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
902         if (dmalen) {
903             esp_do_dma(s);
904         } else if (s->ti_size <= 0) {
905             /*
906              * If this was the last part of a DMA transfer then the
907              * completion interrupt is deferred to here.
908              */
909             esp_dma_done(s);
910             esp_lower_drq(s);
911         }
912     } else if (s->ti_cmd == CMD_TI) {
913         esp_do_nodma(s);
914     }
915 }
916 
917 static void handle_ti(ESPState *s)
918 {
919     uint32_t dmalen;
920 
921     if (s->dma && !s->dma_enabled) {
922         s->dma_cb = handle_ti;
923         return;
924     }
925 
926     s->ti_cmd = s->rregs[ESP_CMD];
927     if (s->dma) {
928         dmalen = esp_get_tc(s);
929         trace_esp_handle_ti(dmalen);
930         s->rregs[ESP_RSTAT] &= ~STAT_TC;
931         esp_do_dma(s);
932     } else {
933         trace_esp_handle_ti(s->ti_size);
934         esp_do_nodma(s);
935     }
936 }
937 
938 void esp_hard_reset(ESPState *s)
939 {
940     memset(s->rregs, 0, ESP_REGS);
941     memset(s->wregs, 0, ESP_REGS);
942     s->tchi_written = 0;
943     s->ti_size = 0;
944     s->async_len = 0;
945     fifo8_reset(&s->fifo);
946     fifo8_reset(&s->cmdfifo);
947     s->dma = 0;
948     s->do_cmd = 0;
949     s->dma_cb = NULL;
950 
951     s->rregs[ESP_CFG1] = 7;
952 }
953 
954 static void esp_soft_reset(ESPState *s)
955 {
956     qemu_irq_lower(s->irq);
957     qemu_irq_lower(s->irq_data);
958     esp_hard_reset(s);
959 }
960 
961 static void esp_bus_reset(ESPState *s)
962 {
963     bus_cold_reset(BUS(&s->bus));
964 }
965 
966 static void parent_esp_reset(ESPState *s, int irq, int level)
967 {
968     if (level) {
969         esp_soft_reset(s);
970     }
971 }
972 
973 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
974 {
975     uint32_t val;
976 
977     switch (saddr) {
978     case ESP_FIFO:
979         if (s->dma_memory_read && s->dma_memory_write &&
980                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
981             /* Data out.  */
982             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
983             s->rregs[ESP_FIFO] = 0;
984         } else {
985             if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
986                 if (s->ti_size) {
987                     esp_do_nodma(s);
988                 } else {
989                     /*
990                      * The last byte of a non-DMA transfer has been read out
991                      * of the FIFO so switch to status phase
992                      */
993                     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
994                 }
995             }
996             s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
997         }
998         val = s->rregs[ESP_FIFO];
999         break;
1000     case ESP_RINTR:
1001         /*
1002          * Clear sequence step, interrupt register and all status bits
1003          * except TC
1004          */
1005         val = s->rregs[ESP_RINTR];
1006         s->rregs[ESP_RINTR] = 0;
1007         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1008         /*
1009          * According to the datasheet ESP_RSEQ should be cleared, but as the
1010          * emulation currently defers information transfers to the next TI
1011          * command leave it for now so that pedantic guests such as the old
1012          * Linux 2.6 driver see the correct flags before the next SCSI phase
1013          * transition.
1014          *
1015          * s->rregs[ESP_RSEQ] = SEQ_0;
1016          */
1017         esp_lower_irq(s);
1018         break;
1019     case ESP_TCHI:
1020         /* Return the unique id if the value has never been written */
1021         if (!s->tchi_written) {
1022             val = s->chip_id;
1023         } else {
1024             val = s->rregs[saddr];
1025         }
1026         break;
1027      case ESP_RFLAGS:
1028         /* Bottom 5 bits indicate number of bytes in FIFO */
1029         val = fifo8_num_used(&s->fifo);
1030         break;
1031     default:
1032         val = s->rregs[saddr];
1033         break;
1034     }
1035 
1036     trace_esp_mem_readb(saddr, val);
1037     return val;
1038 }
1039 
1040 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1041 {
1042     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1043     switch (saddr) {
1044     case ESP_TCHI:
1045         s->tchi_written = true;
1046         /* fall through */
1047     case ESP_TCLO:
1048     case ESP_TCMID:
1049         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1050         break;
1051     case ESP_FIFO:
1052         if (s->do_cmd) {
1053             esp_fifo_push(&s->cmdfifo, val);
1054 
1055             /*
1056              * If any unexpected message out/command phase data is
1057              * transferred using non-DMA, raise the interrupt
1058              */
1059             if (s->rregs[ESP_CMD] == CMD_TI) {
1060                 s->rregs[ESP_RINTR] |= INTR_BS;
1061                 esp_raise_irq(s);
1062             }
1063         } else {
1064             esp_fifo_push(&s->fifo, val);
1065         }
1066         break;
1067     case ESP_CMD:
1068         s->rregs[saddr] = val;
1069         if (val & CMD_DMA) {
1070             s->dma = 1;
1071             /* Reload DMA counter.  */
1072             if (esp_get_stc(s) == 0) {
1073                 esp_set_tc(s, 0x10000);
1074             } else {
1075                 esp_set_tc(s, esp_get_stc(s));
1076             }
1077         } else {
1078             s->dma = 0;
1079         }
1080         switch (val & CMD_CMD) {
1081         case CMD_NOP:
1082             trace_esp_mem_writeb_cmd_nop(val);
1083             break;
1084         case CMD_FLUSH:
1085             trace_esp_mem_writeb_cmd_flush(val);
1086             fifo8_reset(&s->fifo);
1087             break;
1088         case CMD_RESET:
1089             trace_esp_mem_writeb_cmd_reset(val);
1090             esp_soft_reset(s);
1091             break;
1092         case CMD_BUSRESET:
1093             trace_esp_mem_writeb_cmd_bus_reset(val);
1094             esp_bus_reset(s);
1095             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1096                 s->rregs[ESP_RINTR] |= INTR_RST;
1097                 esp_raise_irq(s);
1098             }
1099             break;
1100         case CMD_TI:
1101             trace_esp_mem_writeb_cmd_ti(val);
1102             handle_ti(s);
1103             break;
1104         case CMD_ICCS:
1105             trace_esp_mem_writeb_cmd_iccs(val);
1106             write_response(s);
1107             s->rregs[ESP_RINTR] |= INTR_FC;
1108             s->rregs[ESP_RSTAT] |= STAT_MI;
1109             break;
1110         case CMD_MSGACC:
1111             trace_esp_mem_writeb_cmd_msgacc(val);
1112             s->rregs[ESP_RINTR] |= INTR_DC;
1113             s->rregs[ESP_RSEQ] = 0;
1114             s->rregs[ESP_RFLAGS] = 0;
1115             esp_raise_irq(s);
1116             break;
1117         case CMD_PAD:
1118             trace_esp_mem_writeb_cmd_pad(val);
1119             s->rregs[ESP_RSTAT] = STAT_TC;
1120             s->rregs[ESP_RINTR] |= INTR_FC;
1121             s->rregs[ESP_RSEQ] = 0;
1122             break;
1123         case CMD_SATN:
1124             trace_esp_mem_writeb_cmd_satn(val);
1125             break;
1126         case CMD_RSTATN:
1127             trace_esp_mem_writeb_cmd_rstatn(val);
1128             break;
1129         case CMD_SEL:
1130             trace_esp_mem_writeb_cmd_sel(val);
1131             handle_s_without_atn(s);
1132             break;
1133         case CMD_SELATN:
1134             trace_esp_mem_writeb_cmd_selatn(val);
1135             handle_satn(s);
1136             break;
1137         case CMD_SELATNS:
1138             trace_esp_mem_writeb_cmd_selatns(val);
1139             handle_satn_stop(s);
1140             break;
1141         case CMD_ENSEL:
1142             trace_esp_mem_writeb_cmd_ensel(val);
1143             s->rregs[ESP_RINTR] = 0;
1144             break;
1145         case CMD_DISSEL:
1146             trace_esp_mem_writeb_cmd_dissel(val);
1147             s->rregs[ESP_RINTR] = 0;
1148             esp_raise_irq(s);
1149             break;
1150         default:
1151             trace_esp_error_unhandled_command(val);
1152             break;
1153         }
1154         break;
1155     case ESP_WBUSID ... ESP_WSYNO:
1156         break;
1157     case ESP_CFG1:
1158     case ESP_CFG2: case ESP_CFG3:
1159     case ESP_RES3: case ESP_RES4:
1160         s->rregs[saddr] = val;
1161         break;
1162     case ESP_WCCF ... ESP_WTEST:
1163         break;
1164     default:
1165         trace_esp_error_invalid_write(val, saddr);
1166         return;
1167     }
1168     s->wregs[saddr] = val;
1169 }
1170 
1171 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1172                             unsigned size, bool is_write,
1173                             MemTxAttrs attrs)
1174 {
1175     return (size == 1) || (is_write && size == 4);
1176 }
1177 
1178 static bool esp_is_before_version_5(void *opaque, int version_id)
1179 {
1180     ESPState *s = ESP(opaque);
1181 
1182     version_id = MIN(version_id, s->mig_version_id);
1183     return version_id < 5;
1184 }
1185 
1186 static bool esp_is_version_5(void *opaque, int version_id)
1187 {
1188     ESPState *s = ESP(opaque);
1189 
1190     version_id = MIN(version_id, s->mig_version_id);
1191     return version_id >= 5;
1192 }
1193 
1194 static bool esp_is_version_6(void *opaque, int version_id)
1195 {
1196     ESPState *s = ESP(opaque);
1197 
1198     version_id = MIN(version_id, s->mig_version_id);
1199     return version_id >= 6;
1200 }
1201 
1202 int esp_pre_save(void *opaque)
1203 {
1204     ESPState *s = ESP(object_resolve_path_component(
1205                       OBJECT(opaque), "esp"));
1206 
1207     s->mig_version_id = vmstate_esp.version_id;
1208     return 0;
1209 }
1210 
1211 static int esp_post_load(void *opaque, int version_id)
1212 {
1213     ESPState *s = ESP(opaque);
1214     int len, i;
1215 
1216     version_id = MIN(version_id, s->mig_version_id);
1217 
1218     if (version_id < 5) {
1219         esp_set_tc(s, s->mig_dma_left);
1220 
1221         /* Migrate ti_buf to fifo */
1222         len = s->mig_ti_wptr - s->mig_ti_rptr;
1223         for (i = 0; i < len; i++) {
1224             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1225         }
1226 
1227         /* Migrate cmdbuf to cmdfifo */
1228         for (i = 0; i < s->mig_cmdlen; i++) {
1229             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1230         }
1231     }
1232 
1233     s->mig_version_id = vmstate_esp.version_id;
1234     return 0;
1235 }
1236 
1237 /*
1238  * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1239  * guest CPU to perform the transfers between the SCSI bus and memory
1240  * itself. This is indicated by the dma_memory_read and dma_memory_write
1241  * functions being NULL (in contrast to the ESP PCI device) whilst
1242  * dma_enabled is still set.
1243  */
1244 
1245 static bool esp_pdma_needed(void *opaque)
1246 {
1247     ESPState *s = ESP(opaque);
1248 
1249     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1250            s->dma_enabled;
1251 }
1252 
1253 static const VMStateDescription vmstate_esp_pdma = {
1254     .name = "esp/pdma",
1255     .version_id = 0,
1256     .minimum_version_id = 0,
1257     .needed = esp_pdma_needed,
1258     .fields = (const VMStateField[]) {
1259         VMSTATE_UINT8(pdma_cb, ESPState),
1260         VMSTATE_END_OF_LIST()
1261     }
1262 };
1263 
1264 const VMStateDescription vmstate_esp = {
1265     .name = "esp",
1266     .version_id = 6,
1267     .minimum_version_id = 3,
1268     .post_load = esp_post_load,
1269     .fields = (const VMStateField[]) {
1270         VMSTATE_BUFFER(rregs, ESPState),
1271         VMSTATE_BUFFER(wregs, ESPState),
1272         VMSTATE_INT32(ti_size, ESPState),
1273         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1274         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1275         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1276         VMSTATE_UINT32(status, ESPState),
1277         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1278                             esp_is_before_version_5),
1279         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1280                           esp_is_before_version_5),
1281         VMSTATE_UINT32(dma, ESPState),
1282         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1283                               esp_is_before_version_5, 0, 16),
1284         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1285                               esp_is_before_version_5, 16,
1286                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1287         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1288         VMSTATE_UINT32(do_cmd, ESPState),
1289         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1290         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1291         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1292         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1293         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1294         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1295         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1296         VMSTATE_END_OF_LIST()
1297     },
1298     .subsections = (const VMStateDescription * const []) {
1299         &vmstate_esp_pdma,
1300         NULL
1301     }
1302 };
1303 
1304 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1305                                  uint64_t val, unsigned int size)
1306 {
1307     SysBusESPState *sysbus = opaque;
1308     ESPState *s = ESP(&sysbus->esp);
1309     uint32_t saddr;
1310 
1311     saddr = addr >> sysbus->it_shift;
1312     esp_reg_write(s, saddr, val);
1313 }
1314 
1315 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1316                                     unsigned int size)
1317 {
1318     SysBusESPState *sysbus = opaque;
1319     ESPState *s = ESP(&sysbus->esp);
1320     uint32_t saddr;
1321 
1322     saddr = addr >> sysbus->it_shift;
1323     return esp_reg_read(s, saddr);
1324 }
1325 
1326 static const MemoryRegionOps sysbus_esp_mem_ops = {
1327     .read = sysbus_esp_mem_read,
1328     .write = sysbus_esp_mem_write,
1329     .endianness = DEVICE_NATIVE_ENDIAN,
1330     .valid.accepts = esp_mem_accepts,
1331 };
1332 
1333 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1334                                   uint64_t val, unsigned int size)
1335 {
1336     SysBusESPState *sysbus = opaque;
1337     ESPState *s = ESP(&sysbus->esp);
1338 
1339     trace_esp_pdma_write(size);
1340 
1341     switch (size) {
1342     case 1:
1343         esp_pdma_write(s, val);
1344         break;
1345     case 2:
1346         esp_pdma_write(s, val >> 8);
1347         esp_pdma_write(s, val);
1348         break;
1349     }
1350     esp_pdma_cb(s);
1351 }
1352 
1353 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1354                                      unsigned int size)
1355 {
1356     SysBusESPState *sysbus = opaque;
1357     ESPState *s = ESP(&sysbus->esp);
1358     uint64_t val = 0;
1359 
1360     trace_esp_pdma_read(size);
1361 
1362     switch (size) {
1363     case 1:
1364         val = esp_pdma_read(s);
1365         break;
1366     case 2:
1367         val = esp_pdma_read(s);
1368         val = (val << 8) | esp_pdma_read(s);
1369         break;
1370     }
1371     if (fifo8_num_used(&s->fifo) < 2) {
1372         esp_pdma_cb(s);
1373     }
1374     return val;
1375 }
1376 
1377 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1378 {
1379     ESPState *s = container_of(req->bus, ESPState, bus);
1380 
1381     scsi_req_ref(req);
1382     s->current_req = req;
1383     return s;
1384 }
1385 
1386 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1387     .read = sysbus_esp_pdma_read,
1388     .write = sysbus_esp_pdma_write,
1389     .endianness = DEVICE_NATIVE_ENDIAN,
1390     .valid.min_access_size = 1,
1391     .valid.max_access_size = 4,
1392     .impl.min_access_size = 1,
1393     .impl.max_access_size = 2,
1394 };
1395 
1396 static const struct SCSIBusInfo esp_scsi_info = {
1397     .tcq = false,
1398     .max_target = ESP_MAX_DEVS,
1399     .max_lun = 7,
1400 
1401     .load_request = esp_load_request,
1402     .transfer_data = esp_transfer_data,
1403     .complete = esp_command_complete,
1404     .cancel = esp_request_cancelled
1405 };
1406 
1407 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1408 {
1409     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1410     ESPState *s = ESP(&sysbus->esp);
1411 
1412     switch (irq) {
1413     case 0:
1414         parent_esp_reset(s, irq, level);
1415         break;
1416     case 1:
1417         esp_dma_enable(s, irq, level);
1418         break;
1419     }
1420 }
1421 
1422 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1423 {
1424     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1425     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1426     ESPState *s = ESP(&sysbus->esp);
1427 
1428     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1429         return;
1430     }
1431 
1432     sysbus_init_irq(sbd, &s->irq);
1433     sysbus_init_irq(sbd, &s->irq_data);
1434     assert(sysbus->it_shift != -1);
1435 
1436     s->chip_id = TCHI_FAS100A;
1437     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1438                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1439     sysbus_init_mmio(sbd, &sysbus->iomem);
1440     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1441                           sysbus, "esp-pdma", 4);
1442     sysbus_init_mmio(sbd, &sysbus->pdma);
1443 
1444     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1445 
1446     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1447 }
1448 
1449 static void sysbus_esp_hard_reset(DeviceState *dev)
1450 {
1451     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1452     ESPState *s = ESP(&sysbus->esp);
1453 
1454     esp_hard_reset(s);
1455 }
1456 
1457 static void sysbus_esp_init(Object *obj)
1458 {
1459     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1460 
1461     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1462 }
1463 
1464 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1465     .name = "sysbusespscsi",
1466     .version_id = 2,
1467     .minimum_version_id = 1,
1468     .pre_save = esp_pre_save,
1469     .fields = (const VMStateField[]) {
1470         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1471         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1472         VMSTATE_END_OF_LIST()
1473     }
1474 };
1475 
1476 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1477 {
1478     DeviceClass *dc = DEVICE_CLASS(klass);
1479 
1480     dc->realize = sysbus_esp_realize;
1481     dc->reset = sysbus_esp_hard_reset;
1482     dc->vmsd = &vmstate_sysbus_esp_scsi;
1483     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1484 }
1485 
1486 static const TypeInfo sysbus_esp_info = {
1487     .name          = TYPE_SYSBUS_ESP,
1488     .parent        = TYPE_SYS_BUS_DEVICE,
1489     .instance_init = sysbus_esp_init,
1490     .instance_size = sizeof(SysBusESPState),
1491     .class_init    = sysbus_esp_class_init,
1492 };
1493 
1494 static void esp_finalize(Object *obj)
1495 {
1496     ESPState *s = ESP(obj);
1497 
1498     fifo8_destroy(&s->fifo);
1499     fifo8_destroy(&s->cmdfifo);
1500 }
1501 
1502 static void esp_init(Object *obj)
1503 {
1504     ESPState *s = ESP(obj);
1505 
1506     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1507     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1508 }
1509 
1510 static void esp_class_init(ObjectClass *klass, void *data)
1511 {
1512     DeviceClass *dc = DEVICE_CLASS(klass);
1513 
1514     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1515     dc->user_creatable = false;
1516     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1517 }
1518 
1519 static const TypeInfo esp_info = {
1520     .name = TYPE_ESP,
1521     .parent = TYPE_DEVICE,
1522     .instance_init = esp_init,
1523     .instance_finalize = esp_finalize,
1524     .instance_size = sizeof(ESPState),
1525     .class_init = esp_class_init,
1526 };
1527 
1528 static void esp_register_types(void)
1529 {
1530     type_register_static(&sysbus_esp_info);
1531     type_register_static(&esp_info);
1532 }
1533 
1534 type_init(esp_register_types)
1535