xref: /qemu/hw/scsi/esp.c (revision 042879fc3fc02b67d907b462020c975f6fb1f5ae)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98     }
99 }
100 
101 static void esp_fifo_push(ESPState *s, uint8_t val)
102 {
103     if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) {
104         trace_esp_error_fifo_overrun();
105         return;
106     }
107 
108     fifo8_push(&s->fifo, val);
109 }
110 
111 static uint8_t esp_fifo_pop(ESPState *s)
112 {
113     if (fifo8_is_empty(&s->fifo)) {
114         return 0;
115     }
116 
117     return fifo8_pop(&s->fifo);
118 }
119 
120 static uint32_t esp_get_tc(ESPState *s)
121 {
122     uint32_t dmalen;
123 
124     dmalen = s->rregs[ESP_TCLO];
125     dmalen |= s->rregs[ESP_TCMID] << 8;
126     dmalen |= s->rregs[ESP_TCHI] << 16;
127 
128     return dmalen;
129 }
130 
131 static void esp_set_tc(ESPState *s, uint32_t dmalen)
132 {
133     s->rregs[ESP_TCLO] = dmalen;
134     s->rregs[ESP_TCMID] = dmalen >> 8;
135     s->rregs[ESP_TCHI] = dmalen >> 16;
136 }
137 
138 static uint32_t esp_get_stc(ESPState *s)
139 {
140     uint32_t dmalen;
141 
142     dmalen = s->wregs[ESP_TCLO];
143     dmalen |= s->wregs[ESP_TCMID] << 8;
144     dmalen |= s->wregs[ESP_TCHI] << 16;
145 
146     return dmalen;
147 }
148 
149 static uint8_t esp_pdma_read(ESPState *s)
150 {
151     uint8_t val;
152 
153     if (s->do_cmd) {
154         val = s->cmdbuf[s->cmdlen++];
155     } else {
156         val = esp_fifo_pop(s);
157     }
158 
159     return val;
160 }
161 
162 static void esp_pdma_write(ESPState *s, uint8_t val)
163 {
164     uint32_t dmalen = esp_get_tc(s);
165 
166     if (dmalen == 0) {
167         return;
168     }
169 
170     if (s->do_cmd) {
171         s->cmdbuf[s->cmdlen++] = val;
172     } else {
173         esp_fifo_push(s, val);
174     }
175 
176     dmalen--;
177     esp_set_tc(s, dmalen);
178 }
179 
180 static int esp_select(ESPState *s)
181 {
182     int target;
183 
184     target = s->wregs[ESP_WBUSID] & BUSID_DID;
185 
186     s->ti_size = 0;
187     fifo8_reset(&s->fifo);
188 
189     if (s->current_req) {
190         /* Started a new command before the old one finished.  Cancel it.  */
191         scsi_req_cancel(s->current_req);
192         s->async_len = 0;
193     }
194 
195     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
196     if (!s->current_dev) {
197         /* No such drive */
198         s->rregs[ESP_RSTAT] = 0;
199         s->rregs[ESP_RINTR] |= INTR_DC;
200         s->rregs[ESP_RSEQ] = SEQ_0;
201         esp_raise_irq(s);
202         return -1;
203     }
204 
205     /*
206      * Note that we deliberately don't raise the IRQ here: this will be done
207      * either in do_busid_cmd() for DATA OUT transfers or by the deferred
208      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
209      */
210     s->rregs[ESP_RINTR] |= INTR_FC;
211     s->rregs[ESP_RSEQ] = SEQ_CD;
212     return 0;
213 }
214 
215 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
216 {
217     uint8_t *buf = s->cmdbuf;
218     uint32_t dmalen, n;
219     int target;
220 
221     target = s->wregs[ESP_WBUSID] & BUSID_DID;
222     if (s->dma) {
223         dmalen = MIN(esp_get_tc(s), maxlen);
224         if (dmalen == 0) {
225             return 0;
226         }
227         if (s->dma_memory_read) {
228             s->dma_memory_read(s->dma_opaque, buf, dmalen);
229         } else {
230             if (esp_select(s) < 0) {
231                 return -1;
232             }
233             esp_raise_drq(s);
234             return 0;
235         }
236     } else {
237         dmalen = MIN(s->ti_size, maxlen);
238         if (dmalen == 0) {
239             return 0;
240         }
241         memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
242         if (dmalen >= 3) {
243             buf[0] = buf[2] >> 5;
244         }
245     }
246     trace_esp_get_cmd(dmalen, target);
247 
248     if (esp_select(s) < 0) {
249         return -1;
250     }
251     return dmalen;
252 }
253 
254 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
255 {
256     int32_t datalen;
257     int lun;
258     SCSIDevice *current_lun;
259 
260     trace_esp_do_busid_cmd(busid);
261     lun = busid & 7;
262     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
263     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
264     datalen = scsi_req_enqueue(s->current_req);
265     s->ti_size = datalen;
266     if (datalen != 0) {
267         s->rregs[ESP_RSTAT] = STAT_TC;
268         s->rregs[ESP_RSEQ] = SEQ_CD;
269         esp_set_tc(s, 0);
270         if (datalen > 0) {
271             /*
272              * Switch to DATA IN phase but wait until initial data xfer is
273              * complete before raising the command completion interrupt
274              */
275             s->data_in_ready = false;
276             s->rregs[ESP_RSTAT] |= STAT_DI;
277         } else {
278             s->rregs[ESP_RSTAT] |= STAT_DO;
279             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
280             esp_raise_irq(s);
281             esp_lower_drq(s);
282         }
283         scsi_req_continue(s->current_req);
284         return;
285     }
286 }
287 
288 static void do_cmd(ESPState *s)
289 {
290     uint8_t *buf = s->cmdbuf;
291     uint8_t busid = buf[0];
292 
293     /* Ignore extended messages for now */
294     do_busid_cmd(s, &buf[s->cmdbuf_cdb_offset], busid);
295 }
296 
297 static void satn_pdma_cb(ESPState *s)
298 {
299     s->do_cmd = 0;
300     if (s->cmdlen) {
301         s->cmdbuf_cdb_offset = 1;
302         do_cmd(s);
303     }
304 }
305 
306 static void handle_satn(ESPState *s)
307 {
308     int32_t cmdlen;
309 
310     if (s->dma && !s->dma_enabled) {
311         s->dma_cb = handle_satn;
312         return;
313     }
314     s->pdma_cb = satn_pdma_cb;
315     cmdlen = get_cmd(s, ESP_CMDBUF_SZ);
316     if (cmdlen > 0) {
317         s->cmdlen = cmdlen;
318         s->cmdbuf_cdb_offset = 1;
319         do_cmd(s);
320     } else if (cmdlen == 0) {
321         s->cmdlen = 0;
322         s->do_cmd = 1;
323         /* Target present, but no cmd yet - switch to command phase */
324         s->rregs[ESP_RSEQ] = SEQ_CD;
325         s->rregs[ESP_RSTAT] = STAT_CD;
326     }
327 }
328 
329 static void s_without_satn_pdma_cb(ESPState *s)
330 {
331     s->do_cmd = 0;
332     if (s->cmdlen) {
333         s->cmdbuf_cdb_offset = 0;
334         do_busid_cmd(s, s->cmdbuf, 0);
335     }
336 }
337 
338 static void handle_s_without_atn(ESPState *s)
339 {
340     int32_t cmdlen;
341 
342     if (s->dma && !s->dma_enabled) {
343         s->dma_cb = handle_s_without_atn;
344         return;
345     }
346     s->pdma_cb = s_without_satn_pdma_cb;
347     cmdlen = get_cmd(s, ESP_CMDBUF_SZ);
348     if (cmdlen > 0) {
349         s->cmdlen = cmdlen;
350         s->cmdbuf_cdb_offset = 0;
351         do_busid_cmd(s, s->cmdbuf, 0);
352     } else if (cmdlen == 0) {
353         s->cmdlen = 0;
354         s->do_cmd = 1;
355         /* Target present, but no cmd yet - switch to command phase */
356         s->rregs[ESP_RSEQ] = SEQ_CD;
357         s->rregs[ESP_RSTAT] = STAT_CD;
358     }
359 }
360 
361 static void satn_stop_pdma_cb(ESPState *s)
362 {
363     s->do_cmd = 0;
364     if (s->cmdlen) {
365         trace_esp_handle_satn_stop(s->cmdlen);
366         s->do_cmd = 1;
367         s->cmdbuf_cdb_offset = 1;
368         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
369         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
370         s->rregs[ESP_RSEQ] = SEQ_CD;
371         esp_raise_irq(s);
372     }
373 }
374 
375 static void handle_satn_stop(ESPState *s)
376 {
377     int32_t cmdlen;
378 
379     if (s->dma && !s->dma_enabled) {
380         s->dma_cb = handle_satn_stop;
381         return;
382     }
383     s->pdma_cb = satn_stop_pdma_cb;
384     cmdlen = get_cmd(s, 1);
385     if (cmdlen > 0) {
386         trace_esp_handle_satn_stop(cmdlen);
387         s->cmdlen = cmdlen;
388         s->do_cmd = 1;
389         s->cmdbuf_cdb_offset = 1;
390         s->rregs[ESP_RSTAT] = STAT_MO;
391         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
392         s->rregs[ESP_RSEQ] = SEQ_MO;
393         esp_raise_irq(s);
394     } else if (cmdlen == 0) {
395         s->cmdlen = 0;
396         s->do_cmd = 1;
397         /* Target present, switch to message out phase */
398         s->rregs[ESP_RSEQ] = SEQ_MO;
399         s->rregs[ESP_RSTAT] = STAT_MO;
400     }
401 }
402 
403 static void write_response_pdma_cb(ESPState *s)
404 {
405     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
406     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
407     s->rregs[ESP_RSEQ] = SEQ_CD;
408     esp_raise_irq(s);
409 }
410 
411 static void write_response(ESPState *s)
412 {
413     uint32_t n;
414 
415     trace_esp_write_response(s->status);
416 
417     fifo8_reset(&s->fifo);
418     esp_fifo_push(s, s->status);
419     esp_fifo_push(s, 0);
420 
421     if (s->dma) {
422         if (s->dma_memory_write) {
423             s->dma_memory_write(s->dma_opaque,
424                                 (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
425             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
426             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
427             s->rregs[ESP_RSEQ] = SEQ_CD;
428         } else {
429             s->pdma_cb = write_response_pdma_cb;
430             esp_raise_drq(s);
431             return;
432         }
433     } else {
434         s->ti_size = 2;
435         s->rregs[ESP_RFLAGS] = 2;
436     }
437     esp_raise_irq(s);
438 }
439 
440 static void esp_dma_done(ESPState *s)
441 {
442     s->rregs[ESP_RSTAT] |= STAT_TC;
443     s->rregs[ESP_RINTR] |= INTR_BS;
444     s->rregs[ESP_RSEQ] = 0;
445     s->rregs[ESP_RFLAGS] = 0;
446     esp_set_tc(s, 0);
447     esp_raise_irq(s);
448 }
449 
450 static void do_dma_pdma_cb(ESPState *s)
451 {
452     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
453     int len;
454     uint32_t n;
455 
456     if (s->do_cmd) {
457         s->ti_size = 0;
458         s->cmdlen = 0;
459         s->do_cmd = 0;
460         do_cmd(s);
461         esp_lower_drq(s);
462         return;
463     }
464 
465     if (to_device) {
466         /* Copy FIFO data to device */
467         len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
468         memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
469         s->async_buf += len;
470         s->async_len -= len;
471         s->ti_size += len;
472         if (s->async_len == 0) {
473             scsi_req_continue(s->current_req);
474             return;
475         }
476 
477         if (esp_get_tc(s) == 0) {
478             esp_lower_drq(s);
479             esp_dma_done(s);
480         }
481 
482         return;
483     } else {
484         if (s->async_len == 0) {
485             if (s->current_req) {
486                 /* Defer until the scsi layer has completed */
487                 scsi_req_continue(s->current_req);
488                 s->data_in_ready = false;
489             }
490             return;
491         }
492 
493         if (esp_get_tc(s) != 0) {
494             /* Copy device data to FIFO */
495             len = MIN(s->async_len, fifo8_num_free(&s->fifo));
496             fifo8_push_all(&s->fifo, s->async_buf, len);
497             s->async_buf += len;
498             s->async_len -= len;
499             s->ti_size -= len;
500             esp_set_tc(s, esp_get_tc(s) - len);
501             return;
502         }
503 
504         /* Partially filled a scsi buffer. Complete immediately.  */
505         esp_lower_drq(s);
506         esp_dma_done(s);
507     }
508 }
509 
510 static void esp_do_dma(ESPState *s)
511 {
512     uint32_t len;
513     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
514 
515     len = esp_get_tc(s);
516     if (s->do_cmd) {
517         /*
518          * handle_ti_cmd() case: esp_do_dma() is called only from
519          * handle_ti_cmd() with do_cmd != NULL (see the assert())
520          */
521         trace_esp_do_dma(s->cmdlen, len);
522         assert(s->cmdlen <= sizeof(s->cmdbuf) &&
523                len <= sizeof(s->cmdbuf) - s->cmdlen);
524         if (s->dma_memory_read) {
525             s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
526         } else {
527             s->pdma_cb = do_dma_pdma_cb;
528             esp_raise_drq(s);
529             return;
530         }
531         trace_esp_handle_ti_cmd(s->cmdlen);
532         s->ti_size = 0;
533         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
534             /* No command received */
535             if (s->cmdbuf_cdb_offset == s->cmdlen) {
536                 return;
537             }
538 
539             /* Command has been received */
540             s->cmdlen = 0;
541             s->do_cmd = 0;
542             do_cmd(s);
543         } else {
544             /*
545              * Extra message out bytes received: update cmdbuf_cdb_offset
546              * and then switch to commmand phase
547              */
548             s->cmdbuf_cdb_offset = s->cmdlen;
549             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
550             s->rregs[ESP_RSEQ] = SEQ_CD;
551             s->rregs[ESP_RINTR] |= INTR_BS;
552             esp_raise_irq(s);
553         }
554         return;
555     }
556     if (s->async_len == 0) {
557         /* Defer until data is available.  */
558         return;
559     }
560     if (len > s->async_len) {
561         len = s->async_len;
562     }
563     if (to_device) {
564         if (s->dma_memory_read) {
565             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
566         } else {
567             s->pdma_cb = do_dma_pdma_cb;
568             esp_raise_drq(s);
569             return;
570         }
571     } else {
572         if (s->dma_memory_write) {
573             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
574         } else {
575             /* Copy device data to FIFO */
576             len = MIN(len, fifo8_num_free(&s->fifo));
577             fifo8_push_all(&s->fifo, s->async_buf, len);
578             s->async_buf += len;
579             s->async_len -= len;
580             s->ti_size -= len;
581             esp_set_tc(s, esp_get_tc(s) - len);
582             s->pdma_cb = do_dma_pdma_cb;
583             esp_raise_drq(s);
584 
585             /* Indicate transfer to FIFO is complete */
586             s->rregs[ESP_RSTAT] |= STAT_TC;
587             return;
588         }
589     }
590     esp_set_tc(s, esp_get_tc(s) - len);
591     s->async_buf += len;
592     s->async_len -= len;
593     if (to_device) {
594         s->ti_size += len;
595     } else {
596         s->ti_size -= len;
597     }
598     if (s->async_len == 0) {
599         scsi_req_continue(s->current_req);
600         /*
601          * If there is still data to be read from the device then
602          * complete the DMA operation immediately.  Otherwise defer
603          * until the scsi layer has completed.
604          */
605         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
606             return;
607         }
608     }
609 
610     /* Partially filled a scsi buffer. Complete immediately.  */
611     esp_dma_done(s);
612     esp_lower_drq(s);
613 }
614 
615 void esp_command_complete(SCSIRequest *req, size_t resid)
616 {
617     ESPState *s = req->hba_private;
618 
619     trace_esp_command_complete();
620     if (s->ti_size != 0) {
621         trace_esp_command_complete_unexpected();
622     }
623     s->ti_size = 0;
624     s->async_len = 0;
625     if (req->status) {
626         trace_esp_command_complete_fail();
627     }
628     s->status = req->status;
629     s->rregs[ESP_RSTAT] = STAT_ST;
630     esp_dma_done(s);
631     esp_lower_drq(s);
632     if (s->current_req) {
633         scsi_req_unref(s->current_req);
634         s->current_req = NULL;
635         s->current_dev = NULL;
636     }
637 }
638 
639 void esp_transfer_data(SCSIRequest *req, uint32_t len)
640 {
641     ESPState *s = req->hba_private;
642     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
643     uint32_t dmalen = esp_get_tc(s);
644 
645     assert(!s->do_cmd);
646     trace_esp_transfer_data(dmalen, s->ti_size);
647     s->async_len = len;
648     s->async_buf = scsi_req_get_buf(req);
649 
650     if (!to_device && !s->data_in_ready) {
651         /*
652          * Initial incoming data xfer is complete so raise command
653          * completion interrupt
654          */
655         s->data_in_ready = true;
656         s->rregs[ESP_RSTAT] |= STAT_TC;
657         s->rregs[ESP_RINTR] |= INTR_BS;
658         esp_raise_irq(s);
659 
660         /*
661          * If data is ready to transfer and the TI command has already
662          * been executed, start DMA immediately. Otherwise DMA will start
663          * when host sends the TI command
664          */
665         if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
666             esp_do_dma(s);
667         }
668         return;
669     }
670 
671     if (dmalen) {
672         esp_do_dma(s);
673     } else if (s->ti_size <= 0) {
674         /*
675          * If this was the last part of a DMA transfer then the
676          * completion interrupt is deferred to here.
677          */
678         esp_dma_done(s);
679         esp_lower_drq(s);
680     }
681 }
682 
683 static void handle_ti(ESPState *s)
684 {
685     uint32_t dmalen;
686 
687     if (s->dma && !s->dma_enabled) {
688         s->dma_cb = handle_ti;
689         return;
690     }
691 
692     dmalen = esp_get_tc(s);
693     if (s->dma) {
694         trace_esp_handle_ti(dmalen);
695         s->rregs[ESP_RSTAT] &= ~STAT_TC;
696         esp_do_dma(s);
697     } else if (s->do_cmd) {
698         trace_esp_handle_ti_cmd(s->cmdlen);
699         s->ti_size = 0;
700         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
701             /* No command received */
702             if (s->cmdbuf_cdb_offset == s->cmdlen) {
703                 return;
704             }
705 
706             /* Command has been received */
707             s->cmdlen = 0;
708             s->do_cmd = 0;
709             do_cmd(s);
710         } else {
711             /*
712              * Extra message out bytes received: update cmdbuf_cdb_offset
713              * and then switch to commmand phase
714              */
715             s->cmdbuf_cdb_offset = s->cmdlen;
716             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
717             s->rregs[ESP_RSEQ] = SEQ_CD;
718             s->rregs[ESP_RINTR] |= INTR_BS;
719             esp_raise_irq(s);
720         }
721     }
722 }
723 
724 void esp_hard_reset(ESPState *s)
725 {
726     memset(s->rregs, 0, ESP_REGS);
727     memset(s->wregs, 0, ESP_REGS);
728     s->tchi_written = 0;
729     s->ti_size = 0;
730     fifo8_reset(&s->fifo);
731     s->dma = 0;
732     s->do_cmd = 0;
733     s->dma_cb = NULL;
734 
735     s->rregs[ESP_CFG1] = 7;
736 }
737 
738 static void esp_soft_reset(ESPState *s)
739 {
740     qemu_irq_lower(s->irq);
741     qemu_irq_lower(s->irq_data);
742     esp_hard_reset(s);
743 }
744 
745 static void parent_esp_reset(ESPState *s, int irq, int level)
746 {
747     if (level) {
748         esp_soft_reset(s);
749     }
750 }
751 
752 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
753 {
754     uint32_t val;
755 
756     switch (saddr) {
757     case ESP_FIFO:
758         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
759             /* Data out.  */
760             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
761             s->rregs[ESP_FIFO] = 0;
762         } else {
763             s->ti_size--;
764             s->rregs[ESP_FIFO] = esp_fifo_pop(s);
765         }
766         val = s->rregs[ESP_FIFO];
767         break;
768     case ESP_RINTR:
769         /*
770          * Clear sequence step, interrupt register and all status bits
771          * except TC
772          */
773         val = s->rregs[ESP_RINTR];
774         s->rregs[ESP_RINTR] = 0;
775         s->rregs[ESP_RSTAT] &= ~STAT_TC;
776         s->rregs[ESP_RSEQ] = SEQ_0;
777         esp_lower_irq(s);
778         break;
779     case ESP_TCHI:
780         /* Return the unique id if the value has never been written */
781         if (!s->tchi_written) {
782             val = s->chip_id;
783         } else {
784             val = s->rregs[saddr];
785         }
786         break;
787     default:
788         val = s->rregs[saddr];
789         break;
790     }
791 
792     trace_esp_mem_readb(saddr, val);
793     return val;
794 }
795 
796 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
797 {
798     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
799     switch (saddr) {
800     case ESP_TCHI:
801         s->tchi_written = true;
802         /* fall through */
803     case ESP_TCLO:
804     case ESP_TCMID:
805         s->rregs[ESP_RSTAT] &= ~STAT_TC;
806         break;
807     case ESP_FIFO:
808         if (s->do_cmd) {
809             if (s->cmdlen < ESP_CMDBUF_SZ) {
810                 s->cmdbuf[s->cmdlen++] = val & 0xff;
811             } else {
812                 trace_esp_error_fifo_overrun();
813             }
814         } else {
815             s->ti_size++;
816             esp_fifo_push(s, val);
817         }
818 
819         /* Non-DMA transfers raise an interrupt after every byte */
820         if (s->rregs[ESP_CMD] == CMD_TI) {
821             s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
822             esp_raise_irq(s);
823         }
824         break;
825     case ESP_CMD:
826         s->rregs[saddr] = val;
827         if (val & CMD_DMA) {
828             s->dma = 1;
829             /* Reload DMA counter.  */
830             if (esp_get_stc(s) == 0) {
831                 esp_set_tc(s, 0x10000);
832             } else {
833                 esp_set_tc(s, esp_get_stc(s));
834             }
835         } else {
836             s->dma = 0;
837         }
838         switch (val & CMD_CMD) {
839         case CMD_NOP:
840             trace_esp_mem_writeb_cmd_nop(val);
841             break;
842         case CMD_FLUSH:
843             trace_esp_mem_writeb_cmd_flush(val);
844             fifo8_reset(&s->fifo);
845             break;
846         case CMD_RESET:
847             trace_esp_mem_writeb_cmd_reset(val);
848             esp_soft_reset(s);
849             break;
850         case CMD_BUSRESET:
851             trace_esp_mem_writeb_cmd_bus_reset(val);
852             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
853                 s->rregs[ESP_RINTR] |= INTR_RST;
854                 esp_raise_irq(s);
855             }
856             break;
857         case CMD_TI:
858             trace_esp_mem_writeb_cmd_ti(val);
859             handle_ti(s);
860             break;
861         case CMD_ICCS:
862             trace_esp_mem_writeb_cmd_iccs(val);
863             write_response(s);
864             s->rregs[ESP_RINTR] |= INTR_FC;
865             s->rregs[ESP_RSTAT] |= STAT_MI;
866             break;
867         case CMD_MSGACC:
868             trace_esp_mem_writeb_cmd_msgacc(val);
869             s->rregs[ESP_RINTR] |= INTR_DC;
870             s->rregs[ESP_RSEQ] = 0;
871             s->rregs[ESP_RFLAGS] = 0;
872             esp_raise_irq(s);
873             break;
874         case CMD_PAD:
875             trace_esp_mem_writeb_cmd_pad(val);
876             s->rregs[ESP_RSTAT] = STAT_TC;
877             s->rregs[ESP_RINTR] |= INTR_FC;
878             s->rregs[ESP_RSEQ] = 0;
879             break;
880         case CMD_SATN:
881             trace_esp_mem_writeb_cmd_satn(val);
882             break;
883         case CMD_RSTATN:
884             trace_esp_mem_writeb_cmd_rstatn(val);
885             break;
886         case CMD_SEL:
887             trace_esp_mem_writeb_cmd_sel(val);
888             handle_s_without_atn(s);
889             break;
890         case CMD_SELATN:
891             trace_esp_mem_writeb_cmd_selatn(val);
892             handle_satn(s);
893             break;
894         case CMD_SELATNS:
895             trace_esp_mem_writeb_cmd_selatns(val);
896             handle_satn_stop(s);
897             break;
898         case CMD_ENSEL:
899             trace_esp_mem_writeb_cmd_ensel(val);
900             s->rregs[ESP_RINTR] = 0;
901             break;
902         case CMD_DISSEL:
903             trace_esp_mem_writeb_cmd_dissel(val);
904             s->rregs[ESP_RINTR] = 0;
905             esp_raise_irq(s);
906             break;
907         default:
908             trace_esp_error_unhandled_command(val);
909             break;
910         }
911         break;
912     case ESP_WBUSID ... ESP_WSYNO:
913         break;
914     case ESP_CFG1:
915     case ESP_CFG2: case ESP_CFG3:
916     case ESP_RES3: case ESP_RES4:
917         s->rregs[saddr] = val;
918         break;
919     case ESP_WCCF ... ESP_WTEST:
920         break;
921     default:
922         trace_esp_error_invalid_write(val, saddr);
923         return;
924     }
925     s->wregs[saddr] = val;
926 }
927 
928 static bool esp_mem_accepts(void *opaque, hwaddr addr,
929                             unsigned size, bool is_write,
930                             MemTxAttrs attrs)
931 {
932     return (size == 1) || (is_write && size == 4);
933 }
934 
935 static bool esp_is_before_version_5(void *opaque, int version_id)
936 {
937     ESPState *s = ESP(opaque);
938 
939     version_id = MIN(version_id, s->mig_version_id);
940     return version_id < 5;
941 }
942 
943 static bool esp_is_version_5(void *opaque, int version_id)
944 {
945     ESPState *s = ESP(opaque);
946 
947     version_id = MIN(version_id, s->mig_version_id);
948     return version_id == 5;
949 }
950 
951 static int esp_pre_save(void *opaque)
952 {
953     ESPState *s = ESP(opaque);
954 
955     s->mig_version_id = vmstate_esp.version_id;
956     return 0;
957 }
958 
959 static int esp_post_load(void *opaque, int version_id)
960 {
961     ESPState *s = ESP(opaque);
962     int len, i;
963 
964     version_id = MIN(version_id, s->mig_version_id);
965 
966     if (version_id < 5) {
967         esp_set_tc(s, s->mig_dma_left);
968 
969         /* Migrate ti_buf to fifo */
970         len = s->mig_ti_wptr - s->mig_ti_rptr;
971         for (i = 0; i < len; i++) {
972             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
973         }
974     }
975 
976     s->mig_version_id = vmstate_esp.version_id;
977     return 0;
978 }
979 
980 const VMStateDescription vmstate_esp = {
981     .name = "esp",
982     .version_id = 5,
983     .minimum_version_id = 3,
984     .pre_save = esp_pre_save,
985     .post_load = esp_post_load,
986     .fields = (VMStateField[]) {
987         VMSTATE_BUFFER(rregs, ESPState),
988         VMSTATE_BUFFER(wregs, ESPState),
989         VMSTATE_INT32(ti_size, ESPState),
990         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
991         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
992         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
993         VMSTATE_UINT32(status, ESPState),
994         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
995                             esp_is_before_version_5),
996         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
997                           esp_is_before_version_5),
998         VMSTATE_UINT32(dma, ESPState),
999         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
1000         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
1001         VMSTATE_UINT32(cmdlen, ESPState),
1002         VMSTATE_UINT32(do_cmd, ESPState),
1003         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1004         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1005         VMSTATE_UINT8_TEST(cmdbuf_cdb_offset, ESPState, esp_is_version_5),
1006         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1007         VMSTATE_END_OF_LIST()
1008     },
1009 };
1010 
1011 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1012                                  uint64_t val, unsigned int size)
1013 {
1014     SysBusESPState *sysbus = opaque;
1015     ESPState *s = ESP(&sysbus->esp);
1016     uint32_t saddr;
1017 
1018     saddr = addr >> sysbus->it_shift;
1019     esp_reg_write(s, saddr, val);
1020 }
1021 
1022 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1023                                     unsigned int size)
1024 {
1025     SysBusESPState *sysbus = opaque;
1026     ESPState *s = ESP(&sysbus->esp);
1027     uint32_t saddr;
1028 
1029     saddr = addr >> sysbus->it_shift;
1030     return esp_reg_read(s, saddr);
1031 }
1032 
1033 static const MemoryRegionOps sysbus_esp_mem_ops = {
1034     .read = sysbus_esp_mem_read,
1035     .write = sysbus_esp_mem_write,
1036     .endianness = DEVICE_NATIVE_ENDIAN,
1037     .valid.accepts = esp_mem_accepts,
1038 };
1039 
1040 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1041                                   uint64_t val, unsigned int size)
1042 {
1043     SysBusESPState *sysbus = opaque;
1044     ESPState *s = ESP(&sysbus->esp);
1045     uint32_t dmalen;
1046 
1047     trace_esp_pdma_write(size);
1048 
1049     switch (size) {
1050     case 1:
1051         esp_pdma_write(s, val);
1052         break;
1053     case 2:
1054         esp_pdma_write(s, val >> 8);
1055         esp_pdma_write(s, val);
1056         break;
1057     }
1058     dmalen = esp_get_tc(s);
1059     if (dmalen == 0 || fifo8_is_full(&s->fifo)) {
1060         s->pdma_cb(s);
1061     }
1062 }
1063 
1064 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1065                                      unsigned int size)
1066 {
1067     SysBusESPState *sysbus = opaque;
1068     ESPState *s = ESP(&sysbus->esp);
1069     uint64_t val = 0;
1070 
1071     trace_esp_pdma_read(size);
1072 
1073     switch (size) {
1074     case 1:
1075         val = esp_pdma_read(s);
1076         break;
1077     case 2:
1078         val = esp_pdma_read(s);
1079         val = (val << 8) | esp_pdma_read(s);
1080         break;
1081     }
1082     if (fifo8_is_empty(&s->fifo)) {
1083         s->pdma_cb(s);
1084     }
1085     return val;
1086 }
1087 
1088 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1089     .read = sysbus_esp_pdma_read,
1090     .write = sysbus_esp_pdma_write,
1091     .endianness = DEVICE_NATIVE_ENDIAN,
1092     .valid.min_access_size = 1,
1093     .valid.max_access_size = 4,
1094     .impl.min_access_size = 1,
1095     .impl.max_access_size = 2,
1096 };
1097 
1098 static const struct SCSIBusInfo esp_scsi_info = {
1099     .tcq = false,
1100     .max_target = ESP_MAX_DEVS,
1101     .max_lun = 7,
1102 
1103     .transfer_data = esp_transfer_data,
1104     .complete = esp_command_complete,
1105     .cancel = esp_request_cancelled
1106 };
1107 
1108 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1109 {
1110     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1111     ESPState *s = ESP(&sysbus->esp);
1112 
1113     switch (irq) {
1114     case 0:
1115         parent_esp_reset(s, irq, level);
1116         break;
1117     case 1:
1118         esp_dma_enable(opaque, irq, level);
1119         break;
1120     }
1121 }
1122 
1123 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1124 {
1125     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1126     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1127     ESPState *s = ESP(&sysbus->esp);
1128 
1129     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1130         return;
1131     }
1132 
1133     sysbus_init_irq(sbd, &s->irq);
1134     sysbus_init_irq(sbd, &s->irq_data);
1135     assert(sysbus->it_shift != -1);
1136 
1137     s->chip_id = TCHI_FAS100A;
1138     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1139                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1140     sysbus_init_mmio(sbd, &sysbus->iomem);
1141     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1142                           sysbus, "esp-pdma", 4);
1143     sysbus_init_mmio(sbd, &sysbus->pdma);
1144 
1145     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1146 
1147     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1148 }
1149 
1150 static void sysbus_esp_hard_reset(DeviceState *dev)
1151 {
1152     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1153     ESPState *s = ESP(&sysbus->esp);
1154 
1155     esp_hard_reset(s);
1156 }
1157 
1158 static void sysbus_esp_init(Object *obj)
1159 {
1160     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1161 
1162     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1163 }
1164 
1165 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1166     .name = "sysbusespscsi",
1167     .version_id = 2,
1168     .minimum_version_id = 1,
1169     .fields = (VMStateField[]) {
1170         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1171         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1172         VMSTATE_END_OF_LIST()
1173     }
1174 };
1175 
1176 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1177 {
1178     DeviceClass *dc = DEVICE_CLASS(klass);
1179 
1180     dc->realize = sysbus_esp_realize;
1181     dc->reset = sysbus_esp_hard_reset;
1182     dc->vmsd = &vmstate_sysbus_esp_scsi;
1183     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1184 }
1185 
1186 static const TypeInfo sysbus_esp_info = {
1187     .name          = TYPE_SYSBUS_ESP,
1188     .parent        = TYPE_SYS_BUS_DEVICE,
1189     .instance_init = sysbus_esp_init,
1190     .instance_size = sizeof(SysBusESPState),
1191     .class_init    = sysbus_esp_class_init,
1192 };
1193 
1194 static void esp_finalize(Object *obj)
1195 {
1196     ESPState *s = ESP(obj);
1197 
1198     fifo8_destroy(&s->fifo);
1199 }
1200 
1201 static void esp_init(Object *obj)
1202 {
1203     ESPState *s = ESP(obj);
1204 
1205     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1206 }
1207 
1208 static void esp_class_init(ObjectClass *klass, void *data)
1209 {
1210     DeviceClass *dc = DEVICE_CLASS(klass);
1211 
1212     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1213     dc->user_creatable = false;
1214     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1215 }
1216 
1217 static const TypeInfo esp_info = {
1218     .name = TYPE_ESP,
1219     .parent = TYPE_DEVICE,
1220     .instance_init = esp_init,
1221     .instance_finalize = esp_finalize,
1222     .instance_size = sizeof(ESPState),
1223     .class_init = esp_class_init,
1224 };
1225 
1226 static void esp_register_types(void)
1227 {
1228     type_register_static(&sysbus_esp_info);
1229     type_register_static(&esp_info);
1230 }
1231 
1232 type_init(esp_register_types)
1233