xref: /qemu/hw/scsi/esp.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  * Copyright (c) 2023 Mark Cave-Ayland
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this software and associated documentation files (the "Software"), to deal
10  * in the Software without restriction, including without limitation the rights
11  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the Software is
13  * furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24  * THE SOFTWARE.
25  */
26 
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 
36 /*
37  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38  * also produced as NCR89C100. See
39  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40  * and
41  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42  *
43  * On Macintosh Quadra it is a NCR53C96.
44  */
45 
esp_raise_irq(ESPState * s)46 static void esp_raise_irq(ESPState *s)
47 {
48     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49         s->rregs[ESP_RSTAT] |= STAT_INT;
50         qemu_irq_raise(s->irq);
51         trace_esp_raise_irq();
52     }
53 }
54 
esp_lower_irq(ESPState * s)55 static void esp_lower_irq(ESPState *s)
56 {
57     if (s->rregs[ESP_RSTAT] & STAT_INT) {
58         s->rregs[ESP_RSTAT] &= ~STAT_INT;
59         qemu_irq_lower(s->irq);
60         trace_esp_lower_irq();
61     }
62 }
63 
esp_raise_drq(ESPState * s)64 static void esp_raise_drq(ESPState *s)
65 {
66     if (!(s->drq_state)) {
67         qemu_irq_raise(s->drq_irq);
68         trace_esp_raise_drq();
69         s->drq_state = true;
70     }
71 }
72 
esp_lower_drq(ESPState * s)73 static void esp_lower_drq(ESPState *s)
74 {
75     if (s->drq_state) {
76         qemu_irq_lower(s->drq_irq);
77         trace_esp_lower_drq();
78         s->drq_state = false;
79     }
80 }
81 
82 static const char *esp_phase_names[8] = {
83     "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84     "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86 
esp_set_phase(ESPState * s,uint8_t phase)87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89     s->rregs[ESP_RSTAT] &= ~7;
90     s->rregs[ESP_RSTAT] |= phase;
91 
92     trace_esp_set_phase(esp_phase_names[phase]);
93 }
94 
esp_get_phase(ESPState * s)95 static uint8_t esp_get_phase(ESPState *s)
96 {
97     return s->rregs[ESP_RSTAT] & 7;
98 }
99 
esp_dma_enable(ESPState * s,int irq,int level)100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102     if (level) {
103         s->dma_enabled = 1;
104         trace_esp_dma_enable();
105         if (s->dma_cb) {
106             s->dma_cb(s);
107             s->dma_cb = NULL;
108         }
109     } else {
110         trace_esp_dma_disable();
111         s->dma_enabled = 0;
112     }
113 }
114 
esp_request_cancelled(SCSIRequest * req)115 void esp_request_cancelled(SCSIRequest *req)
116 {
117     ESPState *s = req->hba_private;
118 
119     if (req == s->current_req) {
120         scsi_req_unref(s->current_req);
121         s->current_req = NULL;
122         s->current_dev = NULL;
123         s->async_len = 0;
124     }
125 }
126 
esp_update_drq(ESPState * s)127 static void esp_update_drq(ESPState *s)
128 {
129     bool to_device;
130 
131     switch (esp_get_phase(s)) {
132     case STAT_MO:
133     case STAT_CD:
134     case STAT_DO:
135         to_device = true;
136         break;
137 
138     case STAT_DI:
139     case STAT_ST:
140     case STAT_MI:
141         to_device = false;
142         break;
143 
144     default:
145         return;
146     }
147 
148     if (s->dma) {
149         /* DMA request so update DRQ according to transfer direction */
150         if (to_device) {
151             if (fifo8_num_free(&s->fifo) < 2) {
152                 esp_lower_drq(s);
153             } else {
154                 esp_raise_drq(s);
155             }
156         } else {
157             if (fifo8_num_used(&s->fifo) < 2) {
158                 esp_lower_drq(s);
159             } else {
160                 esp_raise_drq(s);
161             }
162         }
163     } else {
164         /* Not a DMA request */
165         esp_lower_drq(s);
166     }
167 }
168 
esp_fifo_push(ESPState * s,uint8_t val)169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171     if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172         trace_esp_error_fifo_overrun();
173     } else {
174         fifo8_push(&s->fifo, val);
175     }
176 
177     esp_update_drq(s);
178 }
179 
esp_fifo_push_buf(ESPState * s,uint8_t * buf,int len)180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182     fifo8_push_all(&s->fifo, buf, len);
183     esp_update_drq(s);
184 }
185 
esp_fifo_pop(ESPState * s)186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188     uint8_t val;
189 
190     if (fifo8_is_empty(&s->fifo)) {
191         val = 0;
192     } else {
193         val = fifo8_pop(&s->fifo);
194     }
195 
196     esp_update_drq(s);
197     return val;
198 }
199 
esp_fifo_pop_buf(ESPState * s,uint8_t * dest,int maxlen)200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202     uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203 
204     esp_update_drq(s);
205     return len;
206 }
207 
esp_get_tc(ESPState * s)208 static uint32_t esp_get_tc(ESPState *s)
209 {
210     uint32_t dmalen;
211 
212     dmalen = s->rregs[ESP_TCLO];
213     dmalen |= s->rregs[ESP_TCMID] << 8;
214     dmalen |= s->rregs[ESP_TCHI] << 16;
215 
216     return dmalen;
217 }
218 
esp_set_tc(ESPState * s,uint32_t dmalen)219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221     uint32_t old_tc = esp_get_tc(s);
222 
223     s->rregs[ESP_TCLO] = dmalen;
224     s->rregs[ESP_TCMID] = dmalen >> 8;
225     s->rregs[ESP_TCHI] = dmalen >> 16;
226 
227     if (old_tc && dmalen == 0) {
228         s->rregs[ESP_RSTAT] |= STAT_TC;
229     }
230 }
231 
esp_get_stc(ESPState * s)232 static uint32_t esp_get_stc(ESPState *s)
233 {
234     uint32_t dmalen;
235 
236     dmalen = s->wregs[ESP_TCLO];
237     dmalen |= s->wregs[ESP_TCMID] << 8;
238     dmalen |= s->wregs[ESP_TCHI] << 16;
239 
240     return dmalen;
241 }
242 
esp_pdma_read(ESPState * s)243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245     return esp_fifo_pop(s);
246 }
247 
esp_pdma_write(ESPState * s,uint8_t val)248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250     uint32_t dmalen = esp_get_tc(s);
251 
252     esp_fifo_push(s, val);
253 
254     if (dmalen && s->drq_state) {
255         dmalen--;
256         esp_set_tc(s, dmalen);
257     }
258 }
259 
esp_select(ESPState * s)260 static int esp_select(ESPState *s)
261 {
262     int target;
263 
264     target = s->wregs[ESP_WBUSID] & BUSID_DID;
265 
266     s->ti_size = 0;
267     s->rregs[ESP_RSEQ] = SEQ_0;
268 
269     if (s->current_req) {
270         /* Started a new command before the old one finished. Cancel it. */
271         scsi_req_cancel(s->current_req);
272     }
273 
274     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275     if (!s->current_dev) {
276         /* No such drive */
277         s->rregs[ESP_RSTAT] = 0;
278         s->rregs[ESP_RINTR] = INTR_DC;
279         esp_raise_irq(s);
280         return -1;
281     }
282 
283     /*
284      * Note that we deliberately don't raise the IRQ here: this will be done
285      * either in esp_transfer_data() or esp_command_complete()
286      */
287     return 0;
288 }
289 
290 static void esp_do_dma(ESPState *s);
291 static void esp_do_nodma(ESPState *s);
292 
do_command_phase(ESPState * s)293 static void do_command_phase(ESPState *s)
294 {
295     uint32_t cmdlen;
296     int32_t datalen;
297     SCSIDevice *current_lun;
298     uint8_t buf[ESP_CMDFIFO_SZ];
299 
300     trace_esp_do_command_phase(s->lun);
301     cmdlen = fifo8_num_used(&s->cmdfifo);
302     if (!cmdlen || !s->current_dev) {
303         return;
304     }
305     fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
306 
307     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
308     if (!current_lun) {
309         /* No such drive */
310         s->rregs[ESP_RSTAT] = 0;
311         s->rregs[ESP_RINTR] = INTR_DC;
312         s->rregs[ESP_RSEQ] = SEQ_0;
313         esp_raise_irq(s);
314         return;
315     }
316 
317     s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
318     datalen = scsi_req_enqueue(s->current_req);
319     s->ti_size = datalen;
320     fifo8_reset(&s->cmdfifo);
321     s->data_ready = false;
322     if (datalen != 0) {
323         /*
324          * Switch to DATA phase but wait until initial data xfer is
325          * complete before raising the command completion interrupt
326          */
327         if (datalen > 0) {
328             esp_set_phase(s, STAT_DI);
329         } else {
330             esp_set_phase(s, STAT_DO);
331         }
332         scsi_req_continue(s->current_req);
333         return;
334     }
335 }
336 
do_message_phase(ESPState * s)337 static void do_message_phase(ESPState *s)
338 {
339     if (s->cmdfifo_cdb_offset) {
340         uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
341                           fifo8_pop(&s->cmdfifo);
342 
343         trace_esp_do_identify(message);
344         s->lun = message & 7;
345         s->cmdfifo_cdb_offset--;
346     }
347 
348     /* Ignore extended messages for now */
349     if (s->cmdfifo_cdb_offset) {
350         int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
351         fifo8_drop(&s->cmdfifo, len);
352         s->cmdfifo_cdb_offset = 0;
353     }
354 }
355 
do_cmd(ESPState * s)356 static void do_cmd(ESPState *s)
357 {
358     do_message_phase(s);
359     assert(s->cmdfifo_cdb_offset == 0);
360     do_command_phase(s);
361 }
362 
handle_satn(ESPState * s)363 static void handle_satn(ESPState *s)
364 {
365     if (s->dma && !s->dma_enabled) {
366         s->dma_cb = handle_satn;
367         return;
368     }
369 
370     if (esp_select(s) < 0) {
371         return;
372     }
373 
374     esp_set_phase(s, STAT_MO);
375 
376     if (s->dma) {
377         esp_do_dma(s);
378     } else {
379         esp_do_nodma(s);
380     }
381 }
382 
handle_s_without_atn(ESPState * s)383 static void handle_s_without_atn(ESPState *s)
384 {
385     if (s->dma && !s->dma_enabled) {
386         s->dma_cb = handle_s_without_atn;
387         return;
388     }
389 
390     if (esp_select(s) < 0) {
391         return;
392     }
393 
394     esp_set_phase(s, STAT_CD);
395     s->cmdfifo_cdb_offset = 0;
396 
397     if (s->dma) {
398         esp_do_dma(s);
399     } else {
400         esp_do_nodma(s);
401     }
402 }
403 
handle_satn_stop(ESPState * s)404 static void handle_satn_stop(ESPState *s)
405 {
406     if (s->dma && !s->dma_enabled) {
407         s->dma_cb = handle_satn_stop;
408         return;
409     }
410 
411     if (esp_select(s) < 0) {
412         return;
413     }
414 
415     esp_set_phase(s, STAT_MO);
416     s->cmdfifo_cdb_offset = 0;
417 
418     if (s->dma) {
419         esp_do_dma(s);
420     } else {
421         esp_do_nodma(s);
422     }
423 }
424 
handle_pad(ESPState * s)425 static void handle_pad(ESPState *s)
426 {
427     if (s->dma) {
428         esp_do_dma(s);
429     } else {
430         esp_do_nodma(s);
431     }
432 }
433 
write_response(ESPState * s)434 static void write_response(ESPState *s)
435 {
436     trace_esp_write_response(s->status);
437 
438     if (s->dma) {
439         esp_do_dma(s);
440     } else {
441         esp_do_nodma(s);
442     }
443 }
444 
esp_cdb_ready(ESPState * s)445 static bool esp_cdb_ready(ESPState *s)
446 {
447     int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
448     const uint8_t *pbuf;
449     uint32_t n;
450     int cdblen;
451 
452     if (len <= 0) {
453         return false;
454     }
455 
456     pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
457     if (n < len) {
458         /*
459          * In normal use the cmdfifo should never wrap, but include this check
460          * to prevent a malicious guest from reading past the end of the
461          * cmdfifo data buffer below
462          */
463         return false;
464     }
465 
466     cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
467 
468     return cdblen < 0 ? false : (len >= cdblen);
469 }
470 
esp_dma_ti_check(ESPState * s)471 static void esp_dma_ti_check(ESPState *s)
472 {
473     if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
474         s->rregs[ESP_RINTR] |= INTR_BS;
475         esp_raise_irq(s);
476     }
477 }
478 
esp_do_dma(ESPState * s)479 static void esp_do_dma(ESPState *s)
480 {
481     uint32_t len, cmdlen;
482     uint8_t buf[ESP_CMDFIFO_SZ];
483 
484     len = esp_get_tc(s);
485 
486     switch (esp_get_phase(s)) {
487     case STAT_MO:
488         if (s->dma_memory_read) {
489             len = MIN(len, fifo8_num_free(&s->cmdfifo));
490             s->dma_memory_read(s->dma_opaque, buf, len);
491             esp_set_tc(s, esp_get_tc(s) - len);
492         } else {
493             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
494             len = MIN(fifo8_num_free(&s->cmdfifo), len);
495         }
496 
497         fifo8_push_all(&s->cmdfifo, buf, len);
498         s->cmdfifo_cdb_offset += len;
499 
500         switch (s->rregs[ESP_CMD]) {
501         case CMD_SELATN | CMD_DMA:
502             if (fifo8_num_used(&s->cmdfifo) >= 1) {
503                 /* First byte received, switch to command phase */
504                 esp_set_phase(s, STAT_CD);
505                 s->rregs[ESP_RSEQ] = SEQ_CD;
506                 s->cmdfifo_cdb_offset = 1;
507 
508                 if (fifo8_num_used(&s->cmdfifo) > 1) {
509                     /* Process any additional command phase data */
510                     esp_do_dma(s);
511                 }
512             }
513             break;
514 
515         case CMD_SELATNS | CMD_DMA:
516             if (fifo8_num_used(&s->cmdfifo) == 1) {
517                 /* First byte received, stop in message out phase */
518                 s->rregs[ESP_RSEQ] = SEQ_MO;
519                 s->cmdfifo_cdb_offset = 1;
520 
521                 /* Raise command completion interrupt */
522                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
523                 esp_raise_irq(s);
524             }
525             break;
526 
527         case CMD_TI | CMD_DMA:
528             /* ATN remains asserted until TC == 0 */
529             if (esp_get_tc(s) == 0) {
530                 esp_set_phase(s, STAT_CD);
531                 s->rregs[ESP_CMD] = 0;
532                 s->rregs[ESP_RINTR] |= INTR_BS;
533                 esp_raise_irq(s);
534             }
535             break;
536         }
537         break;
538 
539     case STAT_CD:
540         cmdlen = fifo8_num_used(&s->cmdfifo);
541         trace_esp_do_dma(cmdlen, len);
542         if (s->dma_memory_read) {
543             len = MIN(len, fifo8_num_free(&s->cmdfifo));
544             s->dma_memory_read(s->dma_opaque, buf, len);
545             fifo8_push_all(&s->cmdfifo, buf, len);
546             esp_set_tc(s, esp_get_tc(s) - len);
547         } else {
548             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
549             len = MIN(fifo8_num_free(&s->cmdfifo), len);
550             fifo8_push_all(&s->cmdfifo, buf, len);
551         }
552         trace_esp_handle_ti_cmd(cmdlen);
553         s->ti_size = 0;
554         if (esp_get_tc(s) == 0) {
555             /* Command has been received */
556             do_cmd(s);
557         }
558         break;
559 
560     case STAT_DO:
561         if (!s->current_req) {
562             return;
563         }
564         if (s->async_len == 0 && esp_get_tc(s)) {
565             /* Defer until data is available.  */
566             return;
567         }
568         if (len > s->async_len) {
569             len = s->async_len;
570         }
571 
572         switch (s->rregs[ESP_CMD]) {
573         case CMD_TI | CMD_DMA:
574             if (s->dma_memory_read) {
575                 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
576                 esp_set_tc(s, esp_get_tc(s) - len);
577             } else {
578                 /* Copy FIFO data to device */
579                 len = MIN(s->async_len, ESP_FIFO_SZ);
580                 len = MIN(len, fifo8_num_used(&s->fifo));
581                 len = esp_fifo_pop_buf(s, s->async_buf, len);
582             }
583 
584             s->async_buf += len;
585             s->async_len -= len;
586             s->ti_size += len;
587             break;
588 
589         case CMD_PAD | CMD_DMA:
590             /* Copy TC zero bytes into the incoming stream */
591             if (!s->dma_memory_read) {
592                 len = MIN(s->async_len, ESP_FIFO_SZ);
593                 len = MIN(len, fifo8_num_free(&s->fifo));
594             }
595 
596             memset(s->async_buf, 0, len);
597 
598             s->async_buf += len;
599             s->async_len -= len;
600             s->ti_size += len;
601             break;
602         }
603 
604         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
605             /* Defer until the scsi layer has completed */
606             scsi_req_continue(s->current_req);
607             return;
608         }
609 
610         esp_dma_ti_check(s);
611         break;
612 
613     case STAT_DI:
614         if (!s->current_req) {
615             return;
616         }
617         if (s->async_len == 0 && esp_get_tc(s)) {
618             /* Defer until data is available.  */
619             return;
620         }
621         if (len > s->async_len) {
622             len = s->async_len;
623         }
624 
625         switch (s->rregs[ESP_CMD]) {
626         case CMD_TI | CMD_DMA:
627             if (s->dma_memory_write) {
628                 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
629             } else {
630                 /* Copy device data to FIFO */
631                 len = MIN(len, fifo8_num_free(&s->fifo));
632                 esp_fifo_push_buf(s, s->async_buf, len);
633             }
634 
635             s->async_buf += len;
636             s->async_len -= len;
637             s->ti_size -= len;
638             esp_set_tc(s, esp_get_tc(s) - len);
639             break;
640 
641         case CMD_PAD | CMD_DMA:
642             /* Drop TC bytes from the incoming stream */
643             if (!s->dma_memory_write) {
644                 len = MIN(len, fifo8_num_free(&s->fifo));
645             }
646 
647             s->async_buf += len;
648             s->async_len -= len;
649             s->ti_size -= len;
650             esp_set_tc(s, esp_get_tc(s) - len);
651             break;
652         }
653 
654         if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
655             /* If the guest underflows TC then terminate SCSI request */
656             scsi_req_continue(s->current_req);
657             return;
658         }
659 
660         if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
661             /* Defer until the scsi layer has completed */
662             scsi_req_continue(s->current_req);
663             return;
664         }
665 
666         esp_dma_ti_check(s);
667         break;
668 
669     case STAT_ST:
670         switch (s->rregs[ESP_CMD]) {
671         case CMD_ICCS | CMD_DMA:
672             len = MIN(len, 1);
673 
674             if (len) {
675                 buf[0] = s->status;
676 
677                 if (s->dma_memory_write) {
678                     s->dma_memory_write(s->dma_opaque, buf, len);
679                 } else {
680                     esp_fifo_push_buf(s, buf, len);
681                 }
682 
683                 esp_set_tc(s, esp_get_tc(s) - len);
684                 esp_set_phase(s, STAT_MI);
685 
686                 if (esp_get_tc(s) > 0) {
687                     /* Process any message in phase data */
688                     esp_do_dma(s);
689                 }
690             }
691             break;
692 
693         default:
694             /* Consume remaining data if the guest underflows TC */
695             if (fifo8_num_used(&s->fifo) < 2) {
696                 s->rregs[ESP_RINTR] |= INTR_BS;
697                 esp_raise_irq(s);
698             }
699             break;
700         }
701         break;
702 
703     case STAT_MI:
704         switch (s->rregs[ESP_CMD]) {
705         case CMD_ICCS | CMD_DMA:
706             len = MIN(len, 1);
707 
708             if (len) {
709                 buf[0] = 0;
710 
711                 if (s->dma_memory_write) {
712                     s->dma_memory_write(s->dma_opaque, buf, len);
713                 } else {
714                     esp_fifo_push_buf(s, buf, len);
715                 }
716 
717                 esp_set_tc(s, esp_get_tc(s) - len);
718 
719                 /* Raise end of command interrupt */
720                 s->rregs[ESP_RINTR] |= INTR_FC;
721                 esp_raise_irq(s);
722             }
723             break;
724         }
725         break;
726     }
727 }
728 
esp_nodma_ti_dataout(ESPState * s)729 static void esp_nodma_ti_dataout(ESPState *s)
730 {
731     int len;
732 
733     if (!s->current_req) {
734         return;
735     }
736     if (s->async_len == 0) {
737         /* Defer until data is available.  */
738         return;
739     }
740     len = MIN(s->async_len, ESP_FIFO_SZ);
741     len = MIN(len, fifo8_num_used(&s->fifo));
742     esp_fifo_pop_buf(s, s->async_buf, len);
743     s->async_buf += len;
744     s->async_len -= len;
745     s->ti_size += len;
746 
747     if (s->async_len == 0) {
748         scsi_req_continue(s->current_req);
749         return;
750     }
751 
752     s->rregs[ESP_RINTR] |= INTR_BS;
753     esp_raise_irq(s);
754 }
755 
esp_do_nodma(ESPState * s)756 static void esp_do_nodma(ESPState *s)
757 {
758     uint8_t buf[ESP_FIFO_SZ];
759     uint32_t cmdlen;
760     int len;
761 
762     switch (esp_get_phase(s)) {
763     case STAT_MO:
764         switch (s->rregs[ESP_CMD]) {
765         case CMD_SELATN:
766             /* Copy FIFO into cmdfifo */
767             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
768             len = MIN(fifo8_num_free(&s->cmdfifo), len);
769             fifo8_push_all(&s->cmdfifo, buf, len);
770 
771             if (fifo8_num_used(&s->cmdfifo) >= 1) {
772                 /* First byte received, switch to command phase */
773                 esp_set_phase(s, STAT_CD);
774                 s->rregs[ESP_RSEQ] = SEQ_CD;
775                 s->cmdfifo_cdb_offset = 1;
776 
777                 if (fifo8_num_used(&s->cmdfifo) > 1) {
778                     /* Process any additional command phase data */
779                     esp_do_nodma(s);
780                 }
781             }
782             break;
783 
784         case CMD_SELATNS:
785             /* Copy one byte from FIFO into cmdfifo */
786             len = esp_fifo_pop_buf(s, buf,
787                                    MIN(fifo8_num_used(&s->fifo), 1));
788             len = MIN(fifo8_num_free(&s->cmdfifo), len);
789             fifo8_push_all(&s->cmdfifo, buf, len);
790 
791             if (fifo8_num_used(&s->cmdfifo) >= 1) {
792                 /* First byte received, stop in message out phase */
793                 s->rregs[ESP_RSEQ] = SEQ_MO;
794                 s->cmdfifo_cdb_offset = 1;
795 
796                 /* Raise command completion interrupt */
797                 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
798                 esp_raise_irq(s);
799             }
800             break;
801 
802         case CMD_TI:
803             /* Copy FIFO into cmdfifo */
804             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
805             len = MIN(fifo8_num_free(&s->cmdfifo), len);
806             fifo8_push_all(&s->cmdfifo, buf, len);
807 
808             /* ATN remains asserted until FIFO empty */
809             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
810             esp_set_phase(s, STAT_CD);
811             s->rregs[ESP_CMD] = 0;
812             s->rregs[ESP_RINTR] |= INTR_BS;
813             esp_raise_irq(s);
814             break;
815         }
816         break;
817 
818     case STAT_CD:
819         switch (s->rregs[ESP_CMD]) {
820         case CMD_TI:
821             /* Copy FIFO into cmdfifo */
822             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
823             len = MIN(fifo8_num_free(&s->cmdfifo), len);
824             fifo8_push_all(&s->cmdfifo, buf, len);
825 
826             cmdlen = fifo8_num_used(&s->cmdfifo);
827             trace_esp_handle_ti_cmd(cmdlen);
828 
829             /* CDB may be transferred in one or more TI commands */
830             if (esp_cdb_ready(s)) {
831                 /* Command has been received */
832                 do_cmd(s);
833             } else {
834                 /*
835                  * If data was transferred from the FIFO then raise bus
836                  * service interrupt to indicate transfer complete. Otherwise
837                  * defer until the next FIFO write.
838                  */
839                 if (len) {
840                     /* Raise interrupt to indicate transfer complete */
841                     s->rregs[ESP_RINTR] |= INTR_BS;
842                     esp_raise_irq(s);
843                 }
844             }
845             break;
846 
847         case CMD_SEL | CMD_DMA:
848         case CMD_SELATN | CMD_DMA:
849             /* Copy FIFO into cmdfifo */
850             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
851             len = MIN(fifo8_num_free(&s->cmdfifo), len);
852             fifo8_push_all(&s->cmdfifo, buf, len);
853 
854             /* Handle when DMA transfer is terminated by non-DMA FIFO write */
855             if (esp_cdb_ready(s)) {
856                 /* Command has been received */
857                 do_cmd(s);
858             }
859             break;
860 
861         case CMD_SEL:
862         case CMD_SELATN:
863             /* FIFO already contain entire CDB: copy to cmdfifo and execute */
864             len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
865             len = MIN(fifo8_num_free(&s->cmdfifo), len);
866             fifo8_push_all(&s->cmdfifo, buf, len);
867 
868             do_cmd(s);
869             break;
870         }
871         break;
872 
873     case STAT_DO:
874         /* Accumulate data in FIFO until non-DMA TI is executed */
875         break;
876 
877     case STAT_DI:
878         if (!s->current_req) {
879             return;
880         }
881         if (s->async_len == 0) {
882             /* Defer until data is available.  */
883             return;
884         }
885         if (fifo8_is_empty(&s->fifo)) {
886             esp_fifo_push(s, s->async_buf[0]);
887             s->async_buf++;
888             s->async_len--;
889             s->ti_size--;
890         }
891 
892         if (s->async_len == 0) {
893             scsi_req_continue(s->current_req);
894             return;
895         }
896 
897         /* If preloading the FIFO, defer until TI command issued */
898         if (s->rregs[ESP_CMD] != CMD_TI) {
899             return;
900         }
901 
902         s->rregs[ESP_RINTR] |= INTR_BS;
903         esp_raise_irq(s);
904         break;
905 
906     case STAT_ST:
907         switch (s->rregs[ESP_CMD]) {
908         case CMD_ICCS:
909             esp_fifo_push(s, s->status);
910             esp_set_phase(s, STAT_MI);
911 
912             /* Process any message in phase data */
913             esp_do_nodma(s);
914             break;
915         }
916         break;
917 
918     case STAT_MI:
919         switch (s->rregs[ESP_CMD]) {
920         case CMD_ICCS:
921             esp_fifo_push(s, 0);
922 
923             /* Raise end of command interrupt */
924             s->rregs[ESP_RINTR] |= INTR_FC;
925             esp_raise_irq(s);
926             break;
927         }
928         break;
929     }
930 }
931 
esp_command_complete(SCSIRequest * req,size_t resid)932 void esp_command_complete(SCSIRequest *req, size_t resid)
933 {
934     ESPState *s = req->hba_private;
935     int to_device = (esp_get_phase(s) == STAT_DO);
936 
937     trace_esp_command_complete();
938 
939     /*
940      * Non-DMA transfers from the target will leave the last byte in
941      * the FIFO so don't reset ti_size in this case
942      */
943     if (s->dma || to_device) {
944         if (s->ti_size != 0) {
945             trace_esp_command_complete_unexpected();
946         }
947     }
948 
949     s->async_len = 0;
950     if (req->status) {
951         trace_esp_command_complete_fail();
952     }
953     s->status = req->status;
954 
955     /*
956      * Switch to status phase. For non-DMA transfers from the target the last
957      * byte is still in the FIFO
958      */
959     s->ti_size = 0;
960 
961     switch (s->rregs[ESP_CMD]) {
962     case CMD_SEL | CMD_DMA:
963     case CMD_SEL:
964     case CMD_SELATN | CMD_DMA:
965     case CMD_SELATN:
966         /*
967          * No data phase for sequencer command so raise deferred bus service
968          * and function complete interrupt
969          */
970         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
971         s->rregs[ESP_RSEQ] = SEQ_CD;
972         break;
973 
974     case CMD_TI | CMD_DMA:
975     case CMD_TI:
976         s->rregs[ESP_CMD] = 0;
977         break;
978     }
979 
980     /* Raise bus service interrupt to indicate change to STATUS phase */
981     esp_set_phase(s, STAT_ST);
982     s->rregs[ESP_RINTR] |= INTR_BS;
983     esp_raise_irq(s);
984 
985     if (s->current_req) {
986         scsi_req_unref(s->current_req);
987         s->current_req = NULL;
988         s->current_dev = NULL;
989     }
990 }
991 
esp_transfer_data(SCSIRequest * req,uint32_t len)992 void esp_transfer_data(SCSIRequest *req, uint32_t len)
993 {
994     ESPState *s = req->hba_private;
995     uint32_t dmalen = esp_get_tc(s);
996 
997     trace_esp_transfer_data(dmalen, s->ti_size);
998     s->async_len = len;
999     s->async_buf = scsi_req_get_buf(req);
1000 
1001     if (!s->data_ready) {
1002         s->data_ready = true;
1003 
1004         switch (s->rregs[ESP_CMD]) {
1005         case CMD_SEL | CMD_DMA:
1006         case CMD_SEL:
1007         case CMD_SELATN | CMD_DMA:
1008         case CMD_SELATN:
1009             /*
1010              * Initial incoming data xfer is complete for sequencer command
1011              * so raise deferred bus service and function complete interrupt
1012              */
1013              s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1014              s->rregs[ESP_RSEQ] = SEQ_CD;
1015              break;
1016 
1017         case CMD_SELATNS | CMD_DMA:
1018         case CMD_SELATNS:
1019             /*
1020              * Initial incoming data xfer is complete so raise command
1021              * completion interrupt
1022              */
1023              s->rregs[ESP_RINTR] |= INTR_BS;
1024              s->rregs[ESP_RSEQ] = SEQ_MO;
1025              break;
1026 
1027         case CMD_TI | CMD_DMA:
1028         case CMD_TI:
1029             /*
1030              * Bus service interrupt raised because of initial change to
1031              * DATA phase
1032              */
1033             s->rregs[ESP_CMD] = 0;
1034             s->rregs[ESP_RINTR] |= INTR_BS;
1035             break;
1036         }
1037 
1038         esp_raise_irq(s);
1039     }
1040 
1041     /*
1042      * Always perform the initial transfer upon reception of the next TI
1043      * command to ensure the DMA/non-DMA status of the command is correct.
1044      * It is not possible to use s->dma directly in the section below as
1045      * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1046      * async data transfer is delayed then s->dma is set incorrectly.
1047      */
1048 
1049     if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1050         /* When the SCSI layer returns more data, raise deferred INTR_BS */
1051         esp_dma_ti_check(s);
1052 
1053         esp_do_dma(s);
1054     } else if (s->rregs[ESP_CMD] == CMD_TI) {
1055         esp_do_nodma(s);
1056     }
1057 }
1058 
handle_ti(ESPState * s)1059 static void handle_ti(ESPState *s)
1060 {
1061     uint32_t dmalen;
1062 
1063     if (s->dma && !s->dma_enabled) {
1064         s->dma_cb = handle_ti;
1065         return;
1066     }
1067 
1068     if (s->dma) {
1069         dmalen = esp_get_tc(s);
1070         trace_esp_handle_ti(dmalen);
1071         esp_do_dma(s);
1072     } else {
1073         trace_esp_handle_ti(s->ti_size);
1074         esp_do_nodma(s);
1075 
1076         if (esp_get_phase(s) == STAT_DO) {
1077             esp_nodma_ti_dataout(s);
1078         }
1079     }
1080 }
1081 
esp_hard_reset(ESPState * s)1082 void esp_hard_reset(ESPState *s)
1083 {
1084     memset(s->rregs, 0, ESP_REGS);
1085     memset(s->wregs, 0, ESP_REGS);
1086     s->tchi_written = 0;
1087     s->ti_size = 0;
1088     s->async_len = 0;
1089     fifo8_reset(&s->fifo);
1090     fifo8_reset(&s->cmdfifo);
1091     s->dma = 0;
1092     s->dma_cb = NULL;
1093 
1094     s->rregs[ESP_CFG1] = 7;
1095 }
1096 
esp_soft_reset(ESPState * s)1097 static void esp_soft_reset(ESPState *s)
1098 {
1099     qemu_irq_lower(s->irq);
1100     qemu_irq_lower(s->drq_irq);
1101     esp_hard_reset(s);
1102 }
1103 
esp_bus_reset(ESPState * s)1104 static void esp_bus_reset(ESPState *s)
1105 {
1106     bus_cold_reset(BUS(&s->bus));
1107 }
1108 
parent_esp_reset(ESPState * s,int irq,int level)1109 static void parent_esp_reset(ESPState *s, int irq, int level)
1110 {
1111     if (level) {
1112         esp_soft_reset(s);
1113     }
1114 }
1115 
esp_run_cmd(ESPState * s)1116 static void esp_run_cmd(ESPState *s)
1117 {
1118     uint8_t cmd = s->rregs[ESP_CMD];
1119 
1120     if (cmd & CMD_DMA) {
1121         s->dma = 1;
1122         /* Reload DMA counter.  */
1123         if (esp_get_stc(s) == 0) {
1124             esp_set_tc(s, 0x10000);
1125         } else {
1126             esp_set_tc(s, esp_get_stc(s));
1127         }
1128     } else {
1129         s->dma = 0;
1130     }
1131     switch (cmd & CMD_CMD) {
1132     case CMD_NOP:
1133         trace_esp_mem_writeb_cmd_nop(cmd);
1134         break;
1135     case CMD_FLUSH:
1136         trace_esp_mem_writeb_cmd_flush(cmd);
1137         fifo8_reset(&s->fifo);
1138         break;
1139     case CMD_RESET:
1140         trace_esp_mem_writeb_cmd_reset(cmd);
1141         esp_soft_reset(s);
1142         break;
1143     case CMD_BUSRESET:
1144         trace_esp_mem_writeb_cmd_bus_reset(cmd);
1145         esp_bus_reset(s);
1146         if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1147             s->rregs[ESP_RINTR] |= INTR_RST;
1148             esp_raise_irq(s);
1149         }
1150         break;
1151     case CMD_TI:
1152         trace_esp_mem_writeb_cmd_ti(cmd);
1153         handle_ti(s);
1154         break;
1155     case CMD_ICCS:
1156         trace_esp_mem_writeb_cmd_iccs(cmd);
1157         write_response(s);
1158         break;
1159     case CMD_MSGACC:
1160         trace_esp_mem_writeb_cmd_msgacc(cmd);
1161         s->rregs[ESP_RINTR] |= INTR_DC;
1162         s->rregs[ESP_RSEQ] = 0;
1163         s->rregs[ESP_RFLAGS] = 0;
1164         esp_raise_irq(s);
1165         break;
1166     case CMD_PAD:
1167         trace_esp_mem_writeb_cmd_pad(cmd);
1168         handle_pad(s);
1169         break;
1170     case CMD_SATN:
1171         trace_esp_mem_writeb_cmd_satn(cmd);
1172         break;
1173     case CMD_RSTATN:
1174         trace_esp_mem_writeb_cmd_rstatn(cmd);
1175         break;
1176     case CMD_SEL:
1177         trace_esp_mem_writeb_cmd_sel(cmd);
1178         handle_s_without_atn(s);
1179         break;
1180     case CMD_SELATN:
1181         trace_esp_mem_writeb_cmd_selatn(cmd);
1182         handle_satn(s);
1183         break;
1184     case CMD_SELATNS:
1185         trace_esp_mem_writeb_cmd_selatns(cmd);
1186         handle_satn_stop(s);
1187         break;
1188     case CMD_ENSEL:
1189         trace_esp_mem_writeb_cmd_ensel(cmd);
1190         s->rregs[ESP_RINTR] = 0;
1191         break;
1192     case CMD_DISSEL:
1193         trace_esp_mem_writeb_cmd_dissel(cmd);
1194         s->rregs[ESP_RINTR] = 0;
1195         esp_raise_irq(s);
1196         break;
1197     default:
1198         trace_esp_error_unhandled_command(cmd);
1199         break;
1200     }
1201 }
1202 
esp_reg_read(ESPState * s,uint32_t saddr)1203 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1204 {
1205     uint32_t val;
1206 
1207     switch (saddr) {
1208     case ESP_FIFO:
1209         s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1210         val = s->rregs[ESP_FIFO];
1211         break;
1212     case ESP_RINTR:
1213         /*
1214          * Clear sequence step, interrupt register and all status bits
1215          * except TC
1216          */
1217         val = s->rregs[ESP_RINTR];
1218         s->rregs[ESP_RINTR] = 0;
1219         esp_lower_irq(s);
1220         s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1221         /*
1222          * According to the datasheet ESP_RSEQ should be cleared, but as the
1223          * emulation currently defers information transfers to the next TI
1224          * command leave it for now so that pedantic guests such as the old
1225          * Linux 2.6 driver see the correct flags before the next SCSI phase
1226          * transition.
1227          *
1228          * s->rregs[ESP_RSEQ] = SEQ_0;
1229          */
1230         break;
1231     case ESP_TCHI:
1232         /* Return the unique id if the value has never been written */
1233         if (!s->tchi_written) {
1234             val = s->chip_id;
1235         } else {
1236             val = s->rregs[saddr];
1237         }
1238         break;
1239      case ESP_RFLAGS:
1240         /* Bottom 5 bits indicate number of bytes in FIFO */
1241         val = fifo8_num_used(&s->fifo);
1242         break;
1243     default:
1244         val = s->rregs[saddr];
1245         break;
1246     }
1247 
1248     trace_esp_mem_readb(saddr, val);
1249     return val;
1250 }
1251 
esp_reg_write(ESPState * s,uint32_t saddr,uint64_t val)1252 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1253 {
1254     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1255     switch (saddr) {
1256     case ESP_TCHI:
1257         s->tchi_written = true;
1258         /* fall through */
1259     case ESP_TCLO:
1260     case ESP_TCMID:
1261         s->rregs[ESP_RSTAT] &= ~STAT_TC;
1262         break;
1263     case ESP_FIFO:
1264         if (!fifo8_is_full(&s->fifo)) {
1265             esp_fifo_push(s, val);
1266         }
1267         esp_do_nodma(s);
1268         break;
1269     case ESP_CMD:
1270         s->rregs[saddr] = val;
1271         esp_run_cmd(s);
1272         break;
1273     case ESP_WBUSID ... ESP_WSYNO:
1274         break;
1275     case ESP_CFG1:
1276     case ESP_CFG2: case ESP_CFG3:
1277     case ESP_RES3: case ESP_RES4:
1278         s->rregs[saddr] = val;
1279         break;
1280     case ESP_WCCF ... ESP_WTEST:
1281         break;
1282     default:
1283         trace_esp_error_invalid_write(val, saddr);
1284         return;
1285     }
1286     s->wregs[saddr] = val;
1287 }
1288 
esp_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1289 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1290                             unsigned size, bool is_write,
1291                             MemTxAttrs attrs)
1292 {
1293     return (size == 1) || (is_write && size == 4);
1294 }
1295 
esp_is_before_version_5(void * opaque,int version_id)1296 static bool esp_is_before_version_5(void *opaque, int version_id)
1297 {
1298     ESPState *s = ESP(opaque);
1299 
1300     version_id = MIN(version_id, s->mig_version_id);
1301     return version_id < 5;
1302 }
1303 
esp_is_version_5(void * opaque,int version_id)1304 static bool esp_is_version_5(void *opaque, int version_id)
1305 {
1306     ESPState *s = ESP(opaque);
1307 
1308     version_id = MIN(version_id, s->mig_version_id);
1309     return version_id >= 5;
1310 }
1311 
esp_is_version_6(void * opaque,int version_id)1312 static bool esp_is_version_6(void *opaque, int version_id)
1313 {
1314     ESPState *s = ESP(opaque);
1315 
1316     version_id = MIN(version_id, s->mig_version_id);
1317     return version_id >= 6;
1318 }
1319 
esp_is_between_version_5_and_6(void * opaque,int version_id)1320 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1321 {
1322     ESPState *s = ESP(opaque);
1323 
1324     version_id = MIN(version_id, s->mig_version_id);
1325     return version_id >= 5 && version_id <= 6;
1326 }
1327 
esp_pre_save(void * opaque)1328 int esp_pre_save(void *opaque)
1329 {
1330     ESPState *s = ESP(object_resolve_path_component(
1331                       OBJECT(opaque), "esp"));
1332 
1333     s->mig_version_id = vmstate_esp.version_id;
1334     return 0;
1335 }
1336 
esp_post_load(void * opaque,int version_id)1337 static int esp_post_load(void *opaque, int version_id)
1338 {
1339     ESPState *s = ESP(opaque);
1340     int len, i;
1341 
1342     version_id = MIN(version_id, s->mig_version_id);
1343 
1344     if (version_id < 5) {
1345         esp_set_tc(s, s->mig_dma_left);
1346 
1347         /* Migrate ti_buf to fifo */
1348         len = s->mig_ti_wptr - s->mig_ti_rptr;
1349         for (i = 0; i < len; i++) {
1350             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1351         }
1352 
1353         /* Migrate cmdbuf to cmdfifo */
1354         for (i = 0; i < s->mig_cmdlen; i++) {
1355             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1356         }
1357     }
1358 
1359     s->mig_version_id = vmstate_esp.version_id;
1360     return 0;
1361 }
1362 
1363 const VMStateDescription vmstate_esp = {
1364     .name = "esp",
1365     .version_id = 7,
1366     .minimum_version_id = 3,
1367     .post_load = esp_post_load,
1368     .fields = (const VMStateField[]) {
1369         VMSTATE_BUFFER(rregs, ESPState),
1370         VMSTATE_BUFFER(wregs, ESPState),
1371         VMSTATE_INT32(ti_size, ESPState),
1372         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1373         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1374         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1375         VMSTATE_UINT32(status, ESPState),
1376         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1377                             esp_is_before_version_5),
1378         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1379                           esp_is_before_version_5),
1380         VMSTATE_UINT32(dma, ESPState),
1381         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1382                               esp_is_before_version_5, 0, 16),
1383         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1384                               esp_is_before_version_5, 16,
1385                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1386         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1387         VMSTATE_UINT32(do_cmd, ESPState),
1388         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1389         VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1390         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1391         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1392         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1393         VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1394                            esp_is_between_version_5_and_6),
1395         VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1396         VMSTATE_BOOL(drq_state, ESPState),
1397         VMSTATE_END_OF_LIST()
1398     },
1399 };
1400 
sysbus_esp_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1401 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1402                                  uint64_t val, unsigned int size)
1403 {
1404     SysBusESPState *sysbus = opaque;
1405     ESPState *s = ESP(&sysbus->esp);
1406     uint32_t saddr;
1407 
1408     saddr = addr >> sysbus->it_shift;
1409     esp_reg_write(s, saddr, val);
1410 }
1411 
sysbus_esp_mem_read(void * opaque,hwaddr addr,unsigned int size)1412 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1413                                     unsigned int size)
1414 {
1415     SysBusESPState *sysbus = opaque;
1416     ESPState *s = ESP(&sysbus->esp);
1417     uint32_t saddr;
1418 
1419     saddr = addr >> sysbus->it_shift;
1420     return esp_reg_read(s, saddr);
1421 }
1422 
1423 static const MemoryRegionOps sysbus_esp_mem_ops = {
1424     .read = sysbus_esp_mem_read,
1425     .write = sysbus_esp_mem_write,
1426     .endianness = DEVICE_NATIVE_ENDIAN,
1427     .valid.accepts = esp_mem_accepts,
1428 };
1429 
sysbus_esp_pdma_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1430 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1431                                   uint64_t val, unsigned int size)
1432 {
1433     SysBusESPState *sysbus = opaque;
1434     ESPState *s = ESP(&sysbus->esp);
1435 
1436     trace_esp_pdma_write(size);
1437 
1438     switch (size) {
1439     case 1:
1440         esp_pdma_write(s, val);
1441         break;
1442     case 2:
1443         esp_pdma_write(s, val >> 8);
1444         esp_pdma_write(s, val);
1445         break;
1446     }
1447     esp_do_dma(s);
1448 }
1449 
sysbus_esp_pdma_read(void * opaque,hwaddr addr,unsigned int size)1450 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1451                                      unsigned int size)
1452 {
1453     SysBusESPState *sysbus = opaque;
1454     ESPState *s = ESP(&sysbus->esp);
1455     uint64_t val = 0;
1456 
1457     trace_esp_pdma_read(size);
1458 
1459     switch (size) {
1460     case 1:
1461         val = esp_pdma_read(s);
1462         break;
1463     case 2:
1464         val = esp_pdma_read(s);
1465         val = (val << 8) | esp_pdma_read(s);
1466         break;
1467     }
1468     esp_do_dma(s);
1469     return val;
1470 }
1471 
esp_load_request(QEMUFile * f,SCSIRequest * req)1472 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1473 {
1474     ESPState *s = container_of(req->bus, ESPState, bus);
1475 
1476     scsi_req_ref(req);
1477     s->current_req = req;
1478     return s;
1479 }
1480 
1481 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1482     .read = sysbus_esp_pdma_read,
1483     .write = sysbus_esp_pdma_write,
1484     .endianness = DEVICE_NATIVE_ENDIAN,
1485     .valid.min_access_size = 1,
1486     .valid.max_access_size = 4,
1487     .impl.min_access_size = 1,
1488     .impl.max_access_size = 2,
1489 };
1490 
1491 static const struct SCSIBusInfo esp_scsi_info = {
1492     .tcq = false,
1493     .max_target = ESP_MAX_DEVS,
1494     .max_lun = 7,
1495 
1496     .load_request = esp_load_request,
1497     .transfer_data = esp_transfer_data,
1498     .complete = esp_command_complete,
1499     .cancel = esp_request_cancelled
1500 };
1501 
sysbus_esp_gpio_demux(void * opaque,int irq,int level)1502 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1503 {
1504     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1505     ESPState *s = ESP(&sysbus->esp);
1506 
1507     switch (irq) {
1508     case 0:
1509         parent_esp_reset(s, irq, level);
1510         break;
1511     case 1:
1512         esp_dma_enable(s, irq, level);
1513         break;
1514     }
1515 }
1516 
sysbus_esp_realize(DeviceState * dev,Error ** errp)1517 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1518 {
1519     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1520     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1521     ESPState *s = ESP(&sysbus->esp);
1522 
1523     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1524         return;
1525     }
1526 
1527     sysbus_init_irq(sbd, &s->irq);
1528     sysbus_init_irq(sbd, &s->drq_irq);
1529     assert(sysbus->it_shift != -1);
1530 
1531     s->chip_id = TCHI_FAS100A;
1532     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1533                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1534     sysbus_init_mmio(sbd, &sysbus->iomem);
1535     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1536                           sysbus, "esp-pdma", 4);
1537     sysbus_init_mmio(sbd, &sysbus->pdma);
1538 
1539     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1540 
1541     scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1542 }
1543 
sysbus_esp_hard_reset(DeviceState * dev)1544 static void sysbus_esp_hard_reset(DeviceState *dev)
1545 {
1546     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1547     ESPState *s = ESP(&sysbus->esp);
1548 
1549     esp_hard_reset(s);
1550 }
1551 
sysbus_esp_init(Object * obj)1552 static void sysbus_esp_init(Object *obj)
1553 {
1554     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1555 
1556     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1557 }
1558 
1559 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1560     .name = "sysbusespscsi",
1561     .version_id = 2,
1562     .minimum_version_id = 1,
1563     .pre_save = esp_pre_save,
1564     .fields = (const VMStateField[]) {
1565         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1566         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1567         VMSTATE_END_OF_LIST()
1568     }
1569 };
1570 
sysbus_esp_class_init(ObjectClass * klass,const void * data)1571 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1572 {
1573     DeviceClass *dc = DEVICE_CLASS(klass);
1574 
1575     dc->realize = sysbus_esp_realize;
1576     device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1577     dc->vmsd = &vmstate_sysbus_esp_scsi;
1578     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1579 }
1580 
esp_finalize(Object * obj)1581 static void esp_finalize(Object *obj)
1582 {
1583     ESPState *s = ESP(obj);
1584 
1585     fifo8_destroy(&s->fifo);
1586     fifo8_destroy(&s->cmdfifo);
1587 }
1588 
esp_init(Object * obj)1589 static void esp_init(Object *obj)
1590 {
1591     ESPState *s = ESP(obj);
1592 
1593     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1594     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1595 }
1596 
esp_class_init(ObjectClass * klass,const void * data)1597 static void esp_class_init(ObjectClass *klass, const void *data)
1598 {
1599     DeviceClass *dc = DEVICE_CLASS(klass);
1600 
1601     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1602     dc->user_creatable = false;
1603     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1604 }
1605 
1606 static const TypeInfo esp_info_types[] = {
1607     {
1608         .name          = TYPE_SYSBUS_ESP,
1609         .parent        = TYPE_SYS_BUS_DEVICE,
1610         .instance_init = sysbus_esp_init,
1611         .instance_size = sizeof(SysBusESPState),
1612         .class_init    = sysbus_esp_class_init,
1613     },
1614     {
1615         .name = TYPE_ESP,
1616         .parent = TYPE_DEVICE,
1617         .instance_init = esp_init,
1618         .instance_finalize = esp_finalize,
1619         .instance_size = sizeof(ESPState),
1620         .class_init = esp_class_init,
1621     },
1622 };
1623 
1624 DEFINE_TYPES(esp_info_types)
1625