xref: /qemu/hw/scsi/esp.c (revision c5fef9112b15c4b5494791cdf8bbb40bc1938dd3)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98     }
99 }
100 
101 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
102 {
103     if (fifo8_num_used(fifo) == fifo->capacity) {
104         trace_esp_error_fifo_overrun();
105         return;
106     }
107 
108     fifo8_push(fifo, val);
109 }
110 
111 static uint8_t esp_fifo_pop(Fifo8 *fifo)
112 {
113     if (fifo8_is_empty(fifo)) {
114         return 0;
115     }
116 
117     return fifo8_pop(fifo);
118 }
119 
120 static uint32_t esp_get_tc(ESPState *s)
121 {
122     uint32_t dmalen;
123 
124     dmalen = s->rregs[ESP_TCLO];
125     dmalen |= s->rregs[ESP_TCMID] << 8;
126     dmalen |= s->rregs[ESP_TCHI] << 16;
127 
128     return dmalen;
129 }
130 
131 static void esp_set_tc(ESPState *s, uint32_t dmalen)
132 {
133     s->rregs[ESP_TCLO] = dmalen;
134     s->rregs[ESP_TCMID] = dmalen >> 8;
135     s->rregs[ESP_TCHI] = dmalen >> 16;
136 }
137 
138 static uint32_t esp_get_stc(ESPState *s)
139 {
140     uint32_t dmalen;
141 
142     dmalen = s->wregs[ESP_TCLO];
143     dmalen |= s->wregs[ESP_TCMID] << 8;
144     dmalen |= s->wregs[ESP_TCHI] << 16;
145 
146     return dmalen;
147 }
148 
149 static uint8_t esp_pdma_read(ESPState *s)
150 {
151     uint8_t val;
152 
153     if (s->do_cmd) {
154         val = esp_fifo_pop(&s->cmdfifo);
155     } else {
156         val = esp_fifo_pop(&s->fifo);
157     }
158 
159     return val;
160 }
161 
162 static void esp_pdma_write(ESPState *s, uint8_t val)
163 {
164     uint32_t dmalen = esp_get_tc(s);
165 
166     if (dmalen == 0) {
167         return;
168     }
169 
170     if (s->do_cmd) {
171         esp_fifo_push(&s->cmdfifo, val);
172     } else {
173         esp_fifo_push(&s->fifo, val);
174     }
175 
176     dmalen--;
177     esp_set_tc(s, dmalen);
178 }
179 
180 static int esp_select(ESPState *s)
181 {
182     int target;
183 
184     target = s->wregs[ESP_WBUSID] & BUSID_DID;
185 
186     s->ti_size = 0;
187     fifo8_reset(&s->fifo);
188 
189     if (s->current_req) {
190         /* Started a new command before the old one finished.  Cancel it.  */
191         scsi_req_cancel(s->current_req);
192         s->async_len = 0;
193     }
194 
195     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
196     if (!s->current_dev) {
197         /* No such drive */
198         s->rregs[ESP_RSTAT] = 0;
199         s->rregs[ESP_RINTR] |= INTR_DC;
200         s->rregs[ESP_RSEQ] = SEQ_0;
201         esp_raise_irq(s);
202         return -1;
203     }
204 
205     /*
206      * Note that we deliberately don't raise the IRQ here: this will be done
207      * either in do_busid_cmd() for DATA OUT transfers or by the deferred
208      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
209      */
210     s->rregs[ESP_RINTR] |= INTR_FC;
211     s->rregs[ESP_RSEQ] = SEQ_CD;
212     return 0;
213 }
214 
215 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
216 {
217     uint8_t buf[ESP_CMDFIFO_SZ];
218     uint32_t dmalen, n;
219     int target;
220 
221     target = s->wregs[ESP_WBUSID] & BUSID_DID;
222     if (s->dma) {
223         dmalen = MIN(esp_get_tc(s), maxlen);
224         if (dmalen == 0) {
225             return 0;
226         }
227         if (s->dma_memory_read) {
228             s->dma_memory_read(s->dma_opaque, buf, dmalen);
229             fifo8_push_all(&s->cmdfifo, buf, dmalen);
230         } else {
231             if (esp_select(s) < 0) {
232                 fifo8_reset(&s->cmdfifo);
233                 return -1;
234             }
235             esp_raise_drq(s);
236             fifo8_reset(&s->cmdfifo);
237             return 0;
238         }
239     } else {
240         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
241         if (dmalen == 0) {
242             return 0;
243         }
244         memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
245         if (dmalen >= 3) {
246             buf[0] = buf[2] >> 5;
247         }
248         fifo8_push_all(&s->cmdfifo, buf, dmalen);
249     }
250     trace_esp_get_cmd(dmalen, target);
251 
252     if (esp_select(s) < 0) {
253         fifo8_reset(&s->cmdfifo);
254         return -1;
255     }
256     return dmalen;
257 }
258 
259 static void do_busid_cmd(ESPState *s, uint8_t busid)
260 {
261     uint32_t n, cmdlen;
262     int32_t datalen;
263     int lun;
264     SCSIDevice *current_lun;
265     uint8_t *buf;
266 
267     trace_esp_do_busid_cmd(busid);
268     lun = busid & 7;
269     cmdlen = fifo8_num_used(&s->cmdfifo);
270     buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
271 
272     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
273     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
274     datalen = scsi_req_enqueue(s->current_req);
275     s->ti_size = datalen;
276     fifo8_reset(&s->cmdfifo);
277     if (datalen != 0) {
278         s->rregs[ESP_RSTAT] = STAT_TC;
279         s->rregs[ESP_RSEQ] = SEQ_CD;
280         s->ti_cmd = 0;
281         esp_set_tc(s, 0);
282         if (datalen > 0) {
283             /*
284              * Switch to DATA IN phase but wait until initial data xfer is
285              * complete before raising the command completion interrupt
286              */
287             s->data_in_ready = false;
288             s->rregs[ESP_RSTAT] |= STAT_DI;
289         } else {
290             s->rregs[ESP_RSTAT] |= STAT_DO;
291             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
292             esp_raise_irq(s);
293             esp_lower_drq(s);
294         }
295         scsi_req_continue(s->current_req);
296         return;
297     }
298 }
299 
300 static void do_cmd(ESPState *s)
301 {
302     uint8_t busid = fifo8_pop(&s->cmdfifo);
303     uint32_t n;
304 
305     s->cmdfifo_cdb_offset--;
306 
307     /* Ignore extended messages for now */
308     if (s->cmdfifo_cdb_offset) {
309         fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
310         s->cmdfifo_cdb_offset = 0;
311     }
312 
313     do_busid_cmd(s, busid);
314 }
315 
316 static void satn_pdma_cb(ESPState *s)
317 {
318     s->do_cmd = 0;
319     if (!fifo8_is_empty(&s->cmdfifo)) {
320         s->cmdfifo_cdb_offset = 1;
321         do_cmd(s);
322     }
323 }
324 
325 static void handle_satn(ESPState *s)
326 {
327     int32_t cmdlen;
328 
329     if (s->dma && !s->dma_enabled) {
330         s->dma_cb = handle_satn;
331         return;
332     }
333     s->pdma_cb = satn_pdma_cb;
334     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
335     if (cmdlen > 0) {
336         s->cmdfifo_cdb_offset = 1;
337         do_cmd(s);
338     } else if (cmdlen == 0) {
339         s->do_cmd = 1;
340         /* Target present, but no cmd yet - switch to command phase */
341         s->rregs[ESP_RSEQ] = SEQ_CD;
342         s->rregs[ESP_RSTAT] = STAT_CD;
343     }
344 }
345 
346 static void s_without_satn_pdma_cb(ESPState *s)
347 {
348     uint32_t len;
349 
350     s->do_cmd = 0;
351     len = fifo8_num_used(&s->cmdfifo);
352     if (len) {
353         s->cmdfifo_cdb_offset = 0;
354         do_busid_cmd(s, 0);
355     }
356 }
357 
358 static void handle_s_without_atn(ESPState *s)
359 {
360     int32_t cmdlen;
361 
362     if (s->dma && !s->dma_enabled) {
363         s->dma_cb = handle_s_without_atn;
364         return;
365     }
366     s->pdma_cb = s_without_satn_pdma_cb;
367     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
368     if (cmdlen > 0) {
369         s->cmdfifo_cdb_offset = 0;
370         do_busid_cmd(s, 0);
371     } else if (cmdlen == 0) {
372         s->do_cmd = 1;
373         /* Target present, but no cmd yet - switch to command phase */
374         s->rregs[ESP_RSEQ] = SEQ_CD;
375         s->rregs[ESP_RSTAT] = STAT_CD;
376     }
377 }
378 
379 static void satn_stop_pdma_cb(ESPState *s)
380 {
381     s->do_cmd = 0;
382     if (!fifo8_is_empty(&s->cmdfifo)) {
383         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
384         s->do_cmd = 1;
385         s->cmdfifo_cdb_offset = 1;
386         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
387         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
388         s->rregs[ESP_RSEQ] = SEQ_CD;
389         esp_raise_irq(s);
390     }
391 }
392 
393 static void handle_satn_stop(ESPState *s)
394 {
395     int32_t cmdlen;
396 
397     if (s->dma && !s->dma_enabled) {
398         s->dma_cb = handle_satn_stop;
399         return;
400     }
401     s->pdma_cb = satn_stop_pdma_cb;
402     cmdlen = get_cmd(s, 1);
403     if (cmdlen > 0) {
404         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
405         s->do_cmd = 1;
406         s->cmdfifo_cdb_offset = 1;
407         s->rregs[ESP_RSTAT] = STAT_MO;
408         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
409         s->rregs[ESP_RSEQ] = SEQ_MO;
410         esp_raise_irq(s);
411     } else if (cmdlen == 0) {
412         s->do_cmd = 1;
413         /* Target present, switch to message out phase */
414         s->rregs[ESP_RSEQ] = SEQ_MO;
415         s->rregs[ESP_RSTAT] = STAT_MO;
416     }
417 }
418 
419 static void write_response_pdma_cb(ESPState *s)
420 {
421     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
422     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
423     s->rregs[ESP_RSEQ] = SEQ_CD;
424     esp_raise_irq(s);
425 }
426 
427 static void write_response(ESPState *s)
428 {
429     uint8_t buf[2];
430 
431     trace_esp_write_response(s->status);
432 
433     buf[0] = s->status;
434     buf[1] = 0;
435 
436     if (s->dma) {
437         if (s->dma_memory_write) {
438             s->dma_memory_write(s->dma_opaque, buf, 2);
439             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
440             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
441             s->rregs[ESP_RSEQ] = SEQ_CD;
442         } else {
443             s->pdma_cb = write_response_pdma_cb;
444             esp_raise_drq(s);
445             return;
446         }
447     } else {
448         fifo8_reset(&s->fifo);
449         fifo8_push_all(&s->fifo, buf, 2);
450         s->rregs[ESP_RFLAGS] = 2;
451     }
452     esp_raise_irq(s);
453 }
454 
455 static void esp_dma_done(ESPState *s)
456 {
457     s->rregs[ESP_RSTAT] |= STAT_TC;
458     s->rregs[ESP_RINTR] |= INTR_BS;
459     s->rregs[ESP_RSEQ] = 0;
460     s->rregs[ESP_RFLAGS] = 0;
461     esp_set_tc(s, 0);
462     esp_raise_irq(s);
463 }
464 
465 static void do_dma_pdma_cb(ESPState *s)
466 {
467     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
468     int len;
469     uint32_t n;
470 
471     if (s->do_cmd) {
472         s->ti_size = 0;
473         s->do_cmd = 0;
474         do_cmd(s);
475         esp_lower_drq(s);
476         return;
477     }
478 
479     if (!s->current_req) {
480         return;
481     }
482 
483     if (to_device) {
484         /* Copy FIFO data to device */
485         len = MIN(s->async_len, ESP_FIFO_SZ);
486         len = MIN(len, fifo8_num_used(&s->fifo));
487         memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
488         s->async_buf += n;
489         s->async_len -= n;
490         s->ti_size += n;
491 
492         if (n < len) {
493             /* Unaligned accesses can cause FIFO wraparound */
494             len = len - n;
495             memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
496             s->async_buf += n;
497             s->async_len -= n;
498             s->ti_size += n;
499         }
500 
501         if (s->async_len == 0) {
502             scsi_req_continue(s->current_req);
503             return;
504         }
505 
506         if (esp_get_tc(s) == 0) {
507             esp_lower_drq(s);
508             esp_dma_done(s);
509         }
510 
511         return;
512     } else {
513         if (s->async_len == 0) {
514             /* Defer until the scsi layer has completed */
515             scsi_req_continue(s->current_req);
516             s->data_in_ready = false;
517             return;
518         }
519 
520         if (esp_get_tc(s) != 0) {
521             /* Copy device data to FIFO */
522             len = MIN(s->async_len, esp_get_tc(s));
523             len = MIN(len, fifo8_num_free(&s->fifo));
524             fifo8_push_all(&s->fifo, s->async_buf, len);
525             s->async_buf += len;
526             s->async_len -= len;
527             s->ti_size -= len;
528             esp_set_tc(s, esp_get_tc(s) - len);
529 
530             if (esp_get_tc(s) == 0) {
531                 /* Indicate transfer to FIFO is complete */
532                  s->rregs[ESP_RSTAT] |= STAT_TC;
533             }
534             return;
535         }
536 
537         /* Partially filled a scsi buffer. Complete immediately.  */
538         esp_lower_drq(s);
539         esp_dma_done(s);
540     }
541 }
542 
543 static void esp_do_dma(ESPState *s)
544 {
545     uint32_t len, cmdlen;
546     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
547     uint8_t buf[ESP_CMDFIFO_SZ];
548 
549     len = esp_get_tc(s);
550     if (s->do_cmd) {
551         /*
552          * handle_ti_cmd() case: esp_do_dma() is called only from
553          * handle_ti_cmd() with do_cmd != NULL (see the assert())
554          */
555         cmdlen = fifo8_num_used(&s->cmdfifo);
556         trace_esp_do_dma(cmdlen, len);
557         if (s->dma_memory_read) {
558             s->dma_memory_read(s->dma_opaque, buf, len);
559             fifo8_push_all(&s->cmdfifo, buf, len);
560         } else {
561             s->pdma_cb = do_dma_pdma_cb;
562             esp_raise_drq(s);
563             return;
564         }
565         trace_esp_handle_ti_cmd(cmdlen);
566         s->ti_size = 0;
567         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
568             /* No command received */
569             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
570                 return;
571             }
572 
573             /* Command has been received */
574             s->do_cmd = 0;
575             do_cmd(s);
576         } else {
577             /*
578              * Extra message out bytes received: update cmdfifo_cdb_offset
579              * and then switch to commmand phase
580              */
581             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
582             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
583             s->rregs[ESP_RSEQ] = SEQ_CD;
584             s->rregs[ESP_RINTR] |= INTR_BS;
585             esp_raise_irq(s);
586         }
587         return;
588     }
589     if (!s->current_req) {
590         return;
591     }
592     if (s->async_len == 0) {
593         /* Defer until data is available.  */
594         return;
595     }
596     if (len > s->async_len) {
597         len = s->async_len;
598     }
599     if (to_device) {
600         if (s->dma_memory_read) {
601             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
602         } else {
603             s->pdma_cb = do_dma_pdma_cb;
604             esp_raise_drq(s);
605             return;
606         }
607     } else {
608         if (s->dma_memory_write) {
609             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
610         } else {
611             /* Adjust TC for any leftover data in the FIFO */
612             if (!fifo8_is_empty(&s->fifo)) {
613                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
614             }
615 
616             /* Copy device data to FIFO */
617             len = MIN(len, fifo8_num_free(&s->fifo));
618             fifo8_push_all(&s->fifo, s->async_buf, len);
619             s->async_buf += len;
620             s->async_len -= len;
621             s->ti_size -= len;
622 
623             /*
624              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
625              * commands shorter than this must be padded accordingly
626              */
627             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
628                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
629                     esp_fifo_push(&s->fifo, 0);
630                     len++;
631                 }
632             }
633 
634             esp_set_tc(s, esp_get_tc(s) - len);
635             s->pdma_cb = do_dma_pdma_cb;
636             esp_raise_drq(s);
637 
638             /* Indicate transfer to FIFO is complete */
639             s->rregs[ESP_RSTAT] |= STAT_TC;
640             return;
641         }
642     }
643     esp_set_tc(s, esp_get_tc(s) - len);
644     s->async_buf += len;
645     s->async_len -= len;
646     if (to_device) {
647         s->ti_size += len;
648     } else {
649         s->ti_size -= len;
650     }
651     if (s->async_len == 0) {
652         scsi_req_continue(s->current_req);
653         /*
654          * If there is still data to be read from the device then
655          * complete the DMA operation immediately.  Otherwise defer
656          * until the scsi layer has completed.
657          */
658         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
659             return;
660         }
661     }
662 
663     /* Partially filled a scsi buffer. Complete immediately.  */
664     esp_dma_done(s);
665     esp_lower_drq(s);
666 }
667 
668 static void esp_do_nodma(ESPState *s)
669 {
670     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
671     uint32_t cmdlen, n;
672     int len;
673 
674     if (s->do_cmd) {
675         cmdlen = fifo8_num_used(&s->cmdfifo);
676         trace_esp_handle_ti_cmd(cmdlen);
677         s->ti_size = 0;
678         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
679             /* No command received */
680             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
681                 return;
682             }
683 
684             /* Command has been received */
685             s->do_cmd = 0;
686             do_cmd(s);
687         } else {
688             /*
689              * Extra message out bytes received: update cmdfifo_cdb_offset
690              * and then switch to commmand phase
691              */
692             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
693             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
694             s->rregs[ESP_RSEQ] = SEQ_CD;
695             s->rregs[ESP_RINTR] |= INTR_BS;
696             esp_raise_irq(s);
697         }
698         return;
699     }
700 
701     if (!s->current_req) {
702         return;
703     }
704 
705     if (s->async_len == 0) {
706         /* Defer until data is available.  */
707         return;
708     }
709 
710     if (to_device) {
711         len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
712         memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
713         s->async_buf += len;
714         s->async_len -= len;
715         s->ti_size += len;
716     } else {
717         len = MIN(s->ti_size, s->async_len);
718         len = MIN(len, fifo8_num_free(&s->fifo));
719         fifo8_push_all(&s->fifo, s->async_buf, len);
720         s->async_buf += len;
721         s->async_len -= len;
722         s->ti_size -= len;
723     }
724 
725     if (s->async_len == 0) {
726         scsi_req_continue(s->current_req);
727 
728         if (to_device || s->ti_size == 0) {
729             return;
730         }
731     }
732 
733     s->rregs[ESP_RINTR] |= INTR_BS;
734     esp_raise_irq(s);
735 }
736 
737 void esp_command_complete(SCSIRequest *req, size_t resid)
738 {
739     ESPState *s = req->hba_private;
740 
741     trace_esp_command_complete();
742     if (s->ti_size != 0) {
743         trace_esp_command_complete_unexpected();
744     }
745     s->ti_size = 0;
746     s->async_len = 0;
747     if (req->status) {
748         trace_esp_command_complete_fail();
749     }
750     s->status = req->status;
751     s->rregs[ESP_RSTAT] = STAT_ST;
752     esp_dma_done(s);
753     esp_lower_drq(s);
754     if (s->current_req) {
755         scsi_req_unref(s->current_req);
756         s->current_req = NULL;
757         s->current_dev = NULL;
758     }
759 }
760 
761 void esp_transfer_data(SCSIRequest *req, uint32_t len)
762 {
763     ESPState *s = req->hba_private;
764     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
765     uint32_t dmalen = esp_get_tc(s);
766 
767     assert(!s->do_cmd);
768     trace_esp_transfer_data(dmalen, s->ti_size);
769     s->async_len = len;
770     s->async_buf = scsi_req_get_buf(req);
771 
772     if (!to_device && !s->data_in_ready) {
773         /*
774          * Initial incoming data xfer is complete so raise command
775          * completion interrupt
776          */
777         s->data_in_ready = true;
778         s->rregs[ESP_RSTAT] |= STAT_TC;
779         s->rregs[ESP_RINTR] |= INTR_BS;
780         esp_raise_irq(s);
781 
782         /*
783          * If data is ready to transfer and the TI command has already
784          * been executed, start DMA immediately. Otherwise DMA will start
785          * when host sends the TI command
786          */
787         if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
788             esp_do_dma(s);
789         }
790         return;
791     }
792 
793     if (s->ti_cmd == 0) {
794         /*
795          * Always perform the initial transfer upon reception of the next TI
796          * command to ensure the DMA/non-DMA status of the command is correct.
797          * It is not possible to use s->dma directly in the section below as
798          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
799          * async data transfer is delayed then s->dma is set incorrectly.
800          */
801         return;
802     }
803 
804     if (s->ti_cmd & CMD_DMA) {
805         if (dmalen) {
806             esp_do_dma(s);
807         } else if (s->ti_size <= 0) {
808             /*
809              * If this was the last part of a DMA transfer then the
810              * completion interrupt is deferred to here.
811              */
812             esp_dma_done(s);
813             esp_lower_drq(s);
814         }
815     } else {
816         esp_do_nodma(s);
817     }
818 }
819 
820 static void handle_ti(ESPState *s)
821 {
822     uint32_t dmalen;
823 
824     if (s->dma && !s->dma_enabled) {
825         s->dma_cb = handle_ti;
826         return;
827     }
828 
829     s->ti_cmd = s->rregs[ESP_CMD];
830     if (s->dma) {
831         dmalen = esp_get_tc(s);
832         trace_esp_handle_ti(dmalen);
833         s->rregs[ESP_RSTAT] &= ~STAT_TC;
834         esp_do_dma(s);
835     } else {
836         trace_esp_handle_ti(s->ti_size);
837         esp_do_nodma(s);
838     }
839 }
840 
841 void esp_hard_reset(ESPState *s)
842 {
843     memset(s->rregs, 0, ESP_REGS);
844     memset(s->wregs, 0, ESP_REGS);
845     s->tchi_written = 0;
846     s->ti_size = 0;
847     fifo8_reset(&s->fifo);
848     fifo8_reset(&s->cmdfifo);
849     s->dma = 0;
850     s->do_cmd = 0;
851     s->dma_cb = NULL;
852 
853     s->rregs[ESP_CFG1] = 7;
854 }
855 
856 static void esp_soft_reset(ESPState *s)
857 {
858     qemu_irq_lower(s->irq);
859     qemu_irq_lower(s->irq_data);
860     esp_hard_reset(s);
861 }
862 
863 static void parent_esp_reset(ESPState *s, int irq, int level)
864 {
865     if (level) {
866         esp_soft_reset(s);
867     }
868 }
869 
870 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
871 {
872     uint32_t val;
873 
874     switch (saddr) {
875     case ESP_FIFO:
876         if (s->dma_memory_read && s->dma_memory_write &&
877                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
878             /* Data out.  */
879             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
880             s->rregs[ESP_FIFO] = 0;
881         } else {
882             s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
883         }
884         val = s->rregs[ESP_FIFO];
885         break;
886     case ESP_RINTR:
887         /*
888          * Clear sequence step, interrupt register and all status bits
889          * except TC
890          */
891         val = s->rregs[ESP_RINTR];
892         s->rregs[ESP_RINTR] = 0;
893         s->rregs[ESP_RSTAT] &= ~STAT_TC;
894         s->rregs[ESP_RSEQ] = SEQ_0;
895         esp_lower_irq(s);
896         break;
897     case ESP_TCHI:
898         /* Return the unique id if the value has never been written */
899         if (!s->tchi_written) {
900             val = s->chip_id;
901         } else {
902             val = s->rregs[saddr];
903         }
904         break;
905      case ESP_RFLAGS:
906         /* Bottom 5 bits indicate number of bytes in FIFO */
907         val = fifo8_num_used(&s->fifo);
908         break;
909     default:
910         val = s->rregs[saddr];
911         break;
912     }
913 
914     trace_esp_mem_readb(saddr, val);
915     return val;
916 }
917 
918 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
919 {
920     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
921     switch (saddr) {
922     case ESP_TCHI:
923         s->tchi_written = true;
924         /* fall through */
925     case ESP_TCLO:
926     case ESP_TCMID:
927         s->rregs[ESP_RSTAT] &= ~STAT_TC;
928         break;
929     case ESP_FIFO:
930         if (s->do_cmd) {
931             esp_fifo_push(&s->cmdfifo, val);
932         } else {
933             esp_fifo_push(&s->fifo, val);
934         }
935 
936         /* Non-DMA transfers raise an interrupt after every byte */
937         if (s->rregs[ESP_CMD] == CMD_TI) {
938             s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
939             esp_raise_irq(s);
940         }
941         break;
942     case ESP_CMD:
943         s->rregs[saddr] = val;
944         if (val & CMD_DMA) {
945             s->dma = 1;
946             /* Reload DMA counter.  */
947             if (esp_get_stc(s) == 0) {
948                 esp_set_tc(s, 0x10000);
949             } else {
950                 esp_set_tc(s, esp_get_stc(s));
951             }
952         } else {
953             s->dma = 0;
954         }
955         switch (val & CMD_CMD) {
956         case CMD_NOP:
957             trace_esp_mem_writeb_cmd_nop(val);
958             break;
959         case CMD_FLUSH:
960             trace_esp_mem_writeb_cmd_flush(val);
961             fifo8_reset(&s->fifo);
962             break;
963         case CMD_RESET:
964             trace_esp_mem_writeb_cmd_reset(val);
965             esp_soft_reset(s);
966             break;
967         case CMD_BUSRESET:
968             trace_esp_mem_writeb_cmd_bus_reset(val);
969             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
970                 s->rregs[ESP_RINTR] |= INTR_RST;
971                 esp_raise_irq(s);
972             }
973             break;
974         case CMD_TI:
975             trace_esp_mem_writeb_cmd_ti(val);
976             handle_ti(s);
977             break;
978         case CMD_ICCS:
979             trace_esp_mem_writeb_cmd_iccs(val);
980             write_response(s);
981             s->rregs[ESP_RINTR] |= INTR_FC;
982             s->rregs[ESP_RSTAT] |= STAT_MI;
983             break;
984         case CMD_MSGACC:
985             trace_esp_mem_writeb_cmd_msgacc(val);
986             s->rregs[ESP_RINTR] |= INTR_DC;
987             s->rregs[ESP_RSEQ] = 0;
988             s->rregs[ESP_RFLAGS] = 0;
989             esp_raise_irq(s);
990             break;
991         case CMD_PAD:
992             trace_esp_mem_writeb_cmd_pad(val);
993             s->rregs[ESP_RSTAT] = STAT_TC;
994             s->rregs[ESP_RINTR] |= INTR_FC;
995             s->rregs[ESP_RSEQ] = 0;
996             break;
997         case CMD_SATN:
998             trace_esp_mem_writeb_cmd_satn(val);
999             break;
1000         case CMD_RSTATN:
1001             trace_esp_mem_writeb_cmd_rstatn(val);
1002             break;
1003         case CMD_SEL:
1004             trace_esp_mem_writeb_cmd_sel(val);
1005             handle_s_without_atn(s);
1006             break;
1007         case CMD_SELATN:
1008             trace_esp_mem_writeb_cmd_selatn(val);
1009             handle_satn(s);
1010             break;
1011         case CMD_SELATNS:
1012             trace_esp_mem_writeb_cmd_selatns(val);
1013             handle_satn_stop(s);
1014             break;
1015         case CMD_ENSEL:
1016             trace_esp_mem_writeb_cmd_ensel(val);
1017             s->rregs[ESP_RINTR] = 0;
1018             break;
1019         case CMD_DISSEL:
1020             trace_esp_mem_writeb_cmd_dissel(val);
1021             s->rregs[ESP_RINTR] = 0;
1022             esp_raise_irq(s);
1023             break;
1024         default:
1025             trace_esp_error_unhandled_command(val);
1026             break;
1027         }
1028         break;
1029     case ESP_WBUSID ... ESP_WSYNO:
1030         break;
1031     case ESP_CFG1:
1032     case ESP_CFG2: case ESP_CFG3:
1033     case ESP_RES3: case ESP_RES4:
1034         s->rregs[saddr] = val;
1035         break;
1036     case ESP_WCCF ... ESP_WTEST:
1037         break;
1038     default:
1039         trace_esp_error_invalid_write(val, saddr);
1040         return;
1041     }
1042     s->wregs[saddr] = val;
1043 }
1044 
1045 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1046                             unsigned size, bool is_write,
1047                             MemTxAttrs attrs)
1048 {
1049     return (size == 1) || (is_write && size == 4);
1050 }
1051 
1052 static bool esp_is_before_version_5(void *opaque, int version_id)
1053 {
1054     ESPState *s = ESP(opaque);
1055 
1056     version_id = MIN(version_id, s->mig_version_id);
1057     return version_id < 5;
1058 }
1059 
1060 static bool esp_is_version_5(void *opaque, int version_id)
1061 {
1062     ESPState *s = ESP(opaque);
1063 
1064     version_id = MIN(version_id, s->mig_version_id);
1065     return version_id == 5;
1066 }
1067 
1068 int esp_pre_save(void *opaque)
1069 {
1070     ESPState *s = ESP(object_resolve_path_component(
1071                       OBJECT(opaque), "esp"));
1072 
1073     s->mig_version_id = vmstate_esp.version_id;
1074     return 0;
1075 }
1076 
1077 static int esp_post_load(void *opaque, int version_id)
1078 {
1079     ESPState *s = ESP(opaque);
1080     int len, i;
1081 
1082     version_id = MIN(version_id, s->mig_version_id);
1083 
1084     if (version_id < 5) {
1085         esp_set_tc(s, s->mig_dma_left);
1086 
1087         /* Migrate ti_buf to fifo */
1088         len = s->mig_ti_wptr - s->mig_ti_rptr;
1089         for (i = 0; i < len; i++) {
1090             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1091         }
1092 
1093         /* Migrate cmdbuf to cmdfifo */
1094         for (i = 0; i < s->mig_cmdlen; i++) {
1095             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1096         }
1097     }
1098 
1099     s->mig_version_id = vmstate_esp.version_id;
1100     return 0;
1101 }
1102 
1103 const VMStateDescription vmstate_esp = {
1104     .name = "esp",
1105     .version_id = 5,
1106     .minimum_version_id = 3,
1107     .post_load = esp_post_load,
1108     .fields = (VMStateField[]) {
1109         VMSTATE_BUFFER(rregs, ESPState),
1110         VMSTATE_BUFFER(wregs, ESPState),
1111         VMSTATE_INT32(ti_size, ESPState),
1112         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1113         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1114         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1115         VMSTATE_UINT32(status, ESPState),
1116         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1117                             esp_is_before_version_5),
1118         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1119                           esp_is_before_version_5),
1120         VMSTATE_UINT32(dma, ESPState),
1121         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1122                               esp_is_before_version_5, 0, 16),
1123         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1124                               esp_is_before_version_5, 16,
1125                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1126         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1127         VMSTATE_UINT32(do_cmd, ESPState),
1128         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1129         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1130         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1131         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1132         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1133         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1134         VMSTATE_END_OF_LIST()
1135     },
1136 };
1137 
1138 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1139                                  uint64_t val, unsigned int size)
1140 {
1141     SysBusESPState *sysbus = opaque;
1142     ESPState *s = ESP(&sysbus->esp);
1143     uint32_t saddr;
1144 
1145     saddr = addr >> sysbus->it_shift;
1146     esp_reg_write(s, saddr, val);
1147 }
1148 
1149 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1150                                     unsigned int size)
1151 {
1152     SysBusESPState *sysbus = opaque;
1153     ESPState *s = ESP(&sysbus->esp);
1154     uint32_t saddr;
1155 
1156     saddr = addr >> sysbus->it_shift;
1157     return esp_reg_read(s, saddr);
1158 }
1159 
1160 static const MemoryRegionOps sysbus_esp_mem_ops = {
1161     .read = sysbus_esp_mem_read,
1162     .write = sysbus_esp_mem_write,
1163     .endianness = DEVICE_NATIVE_ENDIAN,
1164     .valid.accepts = esp_mem_accepts,
1165 };
1166 
1167 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1168                                   uint64_t val, unsigned int size)
1169 {
1170     SysBusESPState *sysbus = opaque;
1171     ESPState *s = ESP(&sysbus->esp);
1172     uint32_t dmalen;
1173 
1174     trace_esp_pdma_write(size);
1175 
1176     switch (size) {
1177     case 1:
1178         esp_pdma_write(s, val);
1179         break;
1180     case 2:
1181         esp_pdma_write(s, val >> 8);
1182         esp_pdma_write(s, val);
1183         break;
1184     }
1185     dmalen = esp_get_tc(s);
1186     if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) {
1187         s->pdma_cb(s);
1188     }
1189 }
1190 
1191 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1192                                      unsigned int size)
1193 {
1194     SysBusESPState *sysbus = opaque;
1195     ESPState *s = ESP(&sysbus->esp);
1196     uint64_t val = 0;
1197 
1198     trace_esp_pdma_read(size);
1199 
1200     switch (size) {
1201     case 1:
1202         val = esp_pdma_read(s);
1203         break;
1204     case 2:
1205         val = esp_pdma_read(s);
1206         val = (val << 8) | esp_pdma_read(s);
1207         break;
1208     }
1209     if (fifo8_num_used(&s->fifo) < 2) {
1210         s->pdma_cb(s);
1211     }
1212     return val;
1213 }
1214 
1215 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1216     .read = sysbus_esp_pdma_read,
1217     .write = sysbus_esp_pdma_write,
1218     .endianness = DEVICE_NATIVE_ENDIAN,
1219     .valid.min_access_size = 1,
1220     .valid.max_access_size = 4,
1221     .impl.min_access_size = 1,
1222     .impl.max_access_size = 2,
1223 };
1224 
1225 static const struct SCSIBusInfo esp_scsi_info = {
1226     .tcq = false,
1227     .max_target = ESP_MAX_DEVS,
1228     .max_lun = 7,
1229 
1230     .transfer_data = esp_transfer_data,
1231     .complete = esp_command_complete,
1232     .cancel = esp_request_cancelled
1233 };
1234 
1235 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1236 {
1237     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1238     ESPState *s = ESP(&sysbus->esp);
1239 
1240     switch (irq) {
1241     case 0:
1242         parent_esp_reset(s, irq, level);
1243         break;
1244     case 1:
1245         esp_dma_enable(opaque, irq, level);
1246         break;
1247     }
1248 }
1249 
1250 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1251 {
1252     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1253     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1254     ESPState *s = ESP(&sysbus->esp);
1255 
1256     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1257         return;
1258     }
1259 
1260     sysbus_init_irq(sbd, &s->irq);
1261     sysbus_init_irq(sbd, &s->irq_data);
1262     assert(sysbus->it_shift != -1);
1263 
1264     s->chip_id = TCHI_FAS100A;
1265     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1266                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1267     sysbus_init_mmio(sbd, &sysbus->iomem);
1268     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1269                           sysbus, "esp-pdma", 4);
1270     sysbus_init_mmio(sbd, &sysbus->pdma);
1271 
1272     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1273 
1274     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1275 }
1276 
1277 static void sysbus_esp_hard_reset(DeviceState *dev)
1278 {
1279     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1280     ESPState *s = ESP(&sysbus->esp);
1281 
1282     esp_hard_reset(s);
1283 }
1284 
1285 static void sysbus_esp_init(Object *obj)
1286 {
1287     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1288 
1289     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1290 }
1291 
1292 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1293     .name = "sysbusespscsi",
1294     .version_id = 2,
1295     .minimum_version_id = 1,
1296     .pre_save = esp_pre_save,
1297     .fields = (VMStateField[]) {
1298         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1299         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1300         VMSTATE_END_OF_LIST()
1301     }
1302 };
1303 
1304 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1305 {
1306     DeviceClass *dc = DEVICE_CLASS(klass);
1307 
1308     dc->realize = sysbus_esp_realize;
1309     dc->reset = sysbus_esp_hard_reset;
1310     dc->vmsd = &vmstate_sysbus_esp_scsi;
1311     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1312 }
1313 
1314 static const TypeInfo sysbus_esp_info = {
1315     .name          = TYPE_SYSBUS_ESP,
1316     .parent        = TYPE_SYS_BUS_DEVICE,
1317     .instance_init = sysbus_esp_init,
1318     .instance_size = sizeof(SysBusESPState),
1319     .class_init    = sysbus_esp_class_init,
1320 };
1321 
1322 static void esp_finalize(Object *obj)
1323 {
1324     ESPState *s = ESP(obj);
1325 
1326     fifo8_destroy(&s->fifo);
1327     fifo8_destroy(&s->cmdfifo);
1328 }
1329 
1330 static void esp_init(Object *obj)
1331 {
1332     ESPState *s = ESP(obj);
1333 
1334     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1335     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1336 }
1337 
1338 static void esp_class_init(ObjectClass *klass, void *data)
1339 {
1340     DeviceClass *dc = DEVICE_CLASS(klass);
1341 
1342     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1343     dc->user_creatable = false;
1344     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1345 }
1346 
1347 static const TypeInfo esp_info = {
1348     .name = TYPE_ESP,
1349     .parent = TYPE_DEVICE,
1350     .instance_init = esp_init,
1351     .instance_finalize = esp_finalize,
1352     .instance_size = sizeof(ESPState),
1353     .class_init = esp_class_init,
1354 };
1355 
1356 static void esp_register_types(void)
1357 {
1358     type_register_static(&sysbus_esp_info);
1359     type_register_static(&esp_info);
1360 }
1361 
1362 type_init(esp_register_types)
1363