xref: /qemu/hw/scsi/esp.c (revision 5eb7a23fb22f10f09cecba61e7bf61cecaee3f96)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98     }
99 }
100 
101 static uint32_t esp_get_tc(ESPState *s)
102 {
103     uint32_t dmalen;
104 
105     dmalen = s->rregs[ESP_TCLO];
106     dmalen |= s->rregs[ESP_TCMID] << 8;
107     dmalen |= s->rregs[ESP_TCHI] << 16;
108 
109     return dmalen;
110 }
111 
112 static void esp_set_tc(ESPState *s, uint32_t dmalen)
113 {
114     s->rregs[ESP_TCLO] = dmalen;
115     s->rregs[ESP_TCMID] = dmalen >> 8;
116     s->rregs[ESP_TCHI] = dmalen >> 16;
117 }
118 
119 static uint32_t esp_get_stc(ESPState *s)
120 {
121     uint32_t dmalen;
122 
123     dmalen = s->wregs[ESP_TCLO];
124     dmalen |= s->wregs[ESP_TCMID] << 8;
125     dmalen |= s->wregs[ESP_TCHI] << 16;
126 
127     return dmalen;
128 }
129 
130 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
131                      uint32_t index, uint32_t len)
132 {
133     s->pdma_origin = origin;
134     s->pdma_start = index;
135     s->pdma_cur = index;
136     s->pdma_len = len;
137 }
138 
139 static uint8_t *get_pdma_buf(ESPState *s)
140 {
141     switch (s->pdma_origin) {
142     case PDMA:
143         return s->pdma_buf;
144     case TI:
145         return s->ti_buf;
146     case CMD:
147         return s->cmdbuf;
148     case ASYNC:
149         return s->async_buf;
150     }
151     return NULL;
152 }
153 
154 static int get_cmd_cb(ESPState *s)
155 {
156     int target;
157 
158     target = s->wregs[ESP_WBUSID] & BUSID_DID;
159 
160     s->ti_size = 0;
161     s->ti_rptr = 0;
162     s->ti_wptr = 0;
163 
164     if (s->current_req) {
165         /* Started a new command before the old one finished.  Cancel it.  */
166         scsi_req_cancel(s->current_req);
167         s->async_len = 0;
168     }
169 
170     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
171     if (!s->current_dev) {
172         /* No such drive */
173         s->rregs[ESP_RSTAT] = 0;
174         s->rregs[ESP_RINTR] = INTR_DC;
175         s->rregs[ESP_RSEQ] = SEQ_0;
176         esp_raise_irq(s);
177         return -1;
178     }
179     return 0;
180 }
181 
182 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
183 {
184     uint32_t dmalen;
185     int target;
186 
187     target = s->wregs[ESP_WBUSID] & BUSID_DID;
188     if (s->dma) {
189         dmalen = esp_get_tc(s);
190         if (dmalen > buflen) {
191             return 0;
192         }
193         if (s->dma_memory_read) {
194             s->dma_memory_read(s->dma_opaque, buf, dmalen);
195         } else {
196             memcpy(s->pdma_buf, buf, dmalen);
197             set_pdma(s, PDMA, 0, dmalen);
198             esp_raise_drq(s);
199             return 0;
200         }
201     } else {
202         dmalen = s->ti_size;
203         if (dmalen > TI_BUFSZ) {
204             return 0;
205         }
206         memcpy(buf, s->ti_buf, dmalen);
207         buf[0] = buf[2] >> 5;
208     }
209     trace_esp_get_cmd(dmalen, target);
210 
211     if (get_cmd_cb(s) < 0) {
212         return 0;
213     }
214     return dmalen;
215 }
216 
217 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
218 {
219     int32_t datalen;
220     int lun;
221     SCSIDevice *current_lun;
222 
223     trace_esp_do_busid_cmd(busid);
224     lun = busid & 7;
225     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
226     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
227     datalen = scsi_req_enqueue(s->current_req);
228     s->ti_size = datalen;
229     if (datalen != 0) {
230         s->rregs[ESP_RSTAT] = STAT_TC;
231         s->dma_left = 0;
232         if (datalen > 0) {
233             s->rregs[ESP_RSTAT] |= STAT_DI;
234         } else {
235             s->rregs[ESP_RSTAT] |= STAT_DO;
236         }
237         scsi_req_continue(s->current_req);
238     }
239     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
240     s->rregs[ESP_RSEQ] = SEQ_CD;
241     esp_raise_irq(s);
242 }
243 
244 static void do_cmd(ESPState *s, uint8_t *buf)
245 {
246     uint8_t busid = buf[0];
247 
248     do_busid_cmd(s, &buf[1], busid);
249 }
250 
251 static void satn_pdma_cb(ESPState *s)
252 {
253     if (get_cmd_cb(s) < 0) {
254         return;
255     }
256     if (s->pdma_cur != s->pdma_start) {
257         do_cmd(s, get_pdma_buf(s) + s->pdma_start);
258     }
259 }
260 
261 static void handle_satn(ESPState *s)
262 {
263     uint8_t buf[32];
264     int len;
265 
266     if (s->dma && !s->dma_enabled) {
267         s->dma_cb = handle_satn;
268         return;
269     }
270     s->pdma_cb = satn_pdma_cb;
271     len = get_cmd(s, buf, sizeof(buf));
272     if (len) {
273         do_cmd(s, buf);
274     }
275 }
276 
277 static void s_without_satn_pdma_cb(ESPState *s)
278 {
279     if (get_cmd_cb(s) < 0) {
280         return;
281     }
282     if (s->pdma_cur != s->pdma_start) {
283         do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
284     }
285 }
286 
287 static void handle_s_without_atn(ESPState *s)
288 {
289     uint8_t buf[32];
290     int len;
291 
292     if (s->dma && !s->dma_enabled) {
293         s->dma_cb = handle_s_without_atn;
294         return;
295     }
296     s->pdma_cb = s_without_satn_pdma_cb;
297     len = get_cmd(s, buf, sizeof(buf));
298     if (len) {
299         do_busid_cmd(s, buf, 0);
300     }
301 }
302 
303 static void satn_stop_pdma_cb(ESPState *s)
304 {
305     if (get_cmd_cb(s) < 0) {
306         return;
307     }
308     s->cmdlen = s->pdma_cur - s->pdma_start;
309     if (s->cmdlen) {
310         trace_esp_handle_satn_stop(s->cmdlen);
311         s->do_cmd = 1;
312         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
313         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
314         s->rregs[ESP_RSEQ] = SEQ_CD;
315         esp_raise_irq(s);
316     }
317 }
318 
319 static void handle_satn_stop(ESPState *s)
320 {
321     if (s->dma && !s->dma_enabled) {
322         s->dma_cb = handle_satn_stop;
323         return;
324     }
325     s->pdma_cb = satn_stop_pdma_cb;
326     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
327     if (s->cmdlen) {
328         trace_esp_handle_satn_stop(s->cmdlen);
329         s->do_cmd = 1;
330         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
331         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
332         s->rregs[ESP_RSEQ] = SEQ_CD;
333         esp_raise_irq(s);
334     }
335 }
336 
337 static void write_response_pdma_cb(ESPState *s)
338 {
339     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
340     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
341     s->rregs[ESP_RSEQ] = SEQ_CD;
342     esp_raise_irq(s);
343 }
344 
345 static void write_response(ESPState *s)
346 {
347     trace_esp_write_response(s->status);
348     s->ti_buf[0] = s->status;
349     s->ti_buf[1] = 0;
350     if (s->dma) {
351         if (s->dma_memory_write) {
352             s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
353             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
354             s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
355             s->rregs[ESP_RSEQ] = SEQ_CD;
356         } else {
357             set_pdma(s, TI, 0, 2);
358             s->pdma_cb = write_response_pdma_cb;
359             esp_raise_drq(s);
360             return;
361         }
362     } else {
363         s->ti_size = 2;
364         s->ti_rptr = 0;
365         s->ti_wptr = 2;
366         s->rregs[ESP_RFLAGS] = 2;
367     }
368     esp_raise_irq(s);
369 }
370 
371 static void esp_dma_done(ESPState *s)
372 {
373     s->rregs[ESP_RSTAT] |= STAT_TC;
374     s->rregs[ESP_RINTR] = INTR_BS;
375     s->rregs[ESP_RSEQ] = 0;
376     s->rregs[ESP_RFLAGS] = 0;
377     esp_set_tc(s, 0);
378     esp_raise_irq(s);
379 }
380 
381 static void do_dma_pdma_cb(ESPState *s)
382 {
383     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
384     int len = s->pdma_cur - s->pdma_start;
385     if (s->do_cmd) {
386         s->ti_size = 0;
387         s->cmdlen = 0;
388         s->do_cmd = 0;
389         do_cmd(s, s->cmdbuf);
390         return;
391     }
392     s->dma_left -= len;
393     s->async_buf += len;
394     s->async_len -= len;
395     if (to_device) {
396         s->ti_size += len;
397     } else {
398         s->ti_size -= len;
399     }
400     if (s->async_len == 0) {
401         scsi_req_continue(s->current_req);
402         /*
403          * If there is still data to be read from the device then
404          * complete the DMA operation immediately.  Otherwise defer
405          * until the scsi layer has completed.
406          */
407         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
408             return;
409         }
410     }
411 
412     /* Partially filled a scsi buffer. Complete immediately.  */
413     esp_dma_done(s);
414 }
415 
416 static void esp_do_dma(ESPState *s)
417 {
418     uint32_t len;
419     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
420 
421     len = s->dma_left;
422     if (s->do_cmd) {
423         /*
424          * handle_ti_cmd() case: esp_do_dma() is called only from
425          * handle_ti_cmd() with do_cmd != NULL (see the assert())
426          */
427         trace_esp_do_dma(s->cmdlen, len);
428         assert(s->cmdlen <= sizeof(s->cmdbuf) &&
429                len <= sizeof(s->cmdbuf) - s->cmdlen);
430         if (s->dma_memory_read) {
431             s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
432         } else {
433             set_pdma(s, CMD, s->cmdlen, len);
434             s->pdma_cb = do_dma_pdma_cb;
435             esp_raise_drq(s);
436             return;
437         }
438         trace_esp_handle_ti_cmd(s->cmdlen);
439         s->ti_size = 0;
440         s->cmdlen = 0;
441         s->do_cmd = 0;
442         do_cmd(s, s->cmdbuf);
443         return;
444     }
445     if (s->async_len == 0) {
446         /* Defer until data is available.  */
447         return;
448     }
449     if (len > s->async_len) {
450         len = s->async_len;
451     }
452     if (to_device) {
453         if (s->dma_memory_read) {
454             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
455         } else {
456             set_pdma(s, ASYNC, 0, len);
457             s->pdma_cb = do_dma_pdma_cb;
458             esp_raise_drq(s);
459             return;
460         }
461     } else {
462         if (s->dma_memory_write) {
463             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
464         } else {
465             set_pdma(s, ASYNC, 0, len);
466             s->pdma_cb = do_dma_pdma_cb;
467             esp_raise_drq(s);
468             return;
469         }
470     }
471     s->dma_left -= len;
472     s->async_buf += len;
473     s->async_len -= len;
474     if (to_device) {
475         s->ti_size += len;
476     } else {
477         s->ti_size -= len;
478     }
479     if (s->async_len == 0) {
480         scsi_req_continue(s->current_req);
481         /*
482          * If there is still data to be read from the device then
483          * complete the DMA operation immediately.  Otherwise defer
484          * until the scsi layer has completed.
485          */
486         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
487             return;
488         }
489     }
490 
491     /* Partially filled a scsi buffer. Complete immediately.  */
492     esp_dma_done(s);
493 }
494 
495 static void esp_report_command_complete(ESPState *s, uint32_t status)
496 {
497     trace_esp_command_complete();
498     if (s->ti_size != 0) {
499         trace_esp_command_complete_unexpected();
500     }
501     s->ti_size = 0;
502     s->dma_left = 0;
503     s->async_len = 0;
504     if (status) {
505         trace_esp_command_complete_fail();
506     }
507     s->status = status;
508     s->rregs[ESP_RSTAT] = STAT_ST;
509     esp_dma_done(s);
510     if (s->current_req) {
511         scsi_req_unref(s->current_req);
512         s->current_req = NULL;
513         s->current_dev = NULL;
514     }
515 }
516 
517 void esp_command_complete(SCSIRequest *req, size_t resid)
518 {
519     ESPState *s = req->hba_private;
520 
521     if (s->rregs[ESP_RSTAT] & STAT_INT) {
522         /*
523          * Defer handling command complete until the previous
524          * interrupt has been handled.
525          */
526         trace_esp_command_complete_deferred();
527         s->deferred_status = req->status;
528         s->deferred_complete = true;
529         return;
530     }
531     esp_report_command_complete(s, req->status);
532 }
533 
534 void esp_transfer_data(SCSIRequest *req, uint32_t len)
535 {
536     ESPState *s = req->hba_private;
537 
538     assert(!s->do_cmd);
539     trace_esp_transfer_data(s->dma_left, s->ti_size);
540     s->async_len = len;
541     s->async_buf = scsi_req_get_buf(req);
542     if (s->dma_left) {
543         esp_do_dma(s);
544     } else if (s->ti_size <= 0) {
545         /*
546          * If this was the last part of a DMA transfer then the
547          * completion interrupt is deferred to here.
548          */
549         esp_dma_done(s);
550     }
551 }
552 
553 static void handle_ti(ESPState *s)
554 {
555     uint32_t dmalen, minlen;
556 
557     if (s->dma && !s->dma_enabled) {
558         s->dma_cb = handle_ti;
559         return;
560     }
561 
562     dmalen = esp_get_tc(s);
563 
564     if (s->do_cmd) {
565         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
566     } else if (s->ti_size < 0) {
567         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
568     } else {
569         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
570     }
571     trace_esp_handle_ti(minlen);
572     if (s->dma) {
573         s->dma_left = minlen;
574         s->rregs[ESP_RSTAT] &= ~STAT_TC;
575         esp_do_dma(s);
576     } else if (s->do_cmd) {
577         trace_esp_handle_ti_cmd(s->cmdlen);
578         s->ti_size = 0;
579         s->cmdlen = 0;
580         s->do_cmd = 0;
581         do_cmd(s, s->cmdbuf);
582     }
583 }
584 
585 void esp_hard_reset(ESPState *s)
586 {
587     memset(s->rregs, 0, ESP_REGS);
588     memset(s->wregs, 0, ESP_REGS);
589     s->tchi_written = 0;
590     s->ti_size = 0;
591     s->ti_rptr = 0;
592     s->ti_wptr = 0;
593     s->dma = 0;
594     s->do_cmd = 0;
595     s->dma_cb = NULL;
596 
597     s->rregs[ESP_CFG1] = 7;
598 }
599 
600 static void esp_soft_reset(ESPState *s)
601 {
602     qemu_irq_lower(s->irq);
603     qemu_irq_lower(s->irq_data);
604     esp_hard_reset(s);
605 }
606 
607 static void parent_esp_reset(ESPState *s, int irq, int level)
608 {
609     if (level) {
610         esp_soft_reset(s);
611     }
612 }
613 
614 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
615 {
616     uint32_t val;
617 
618     switch (saddr) {
619     case ESP_FIFO:
620         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
621             /* Data out.  */
622             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
623             s->rregs[ESP_FIFO] = 0;
624         } else if (s->ti_rptr < s->ti_wptr) {
625             s->ti_size--;
626             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
627         }
628         if (s->ti_rptr == s->ti_wptr) {
629             s->ti_rptr = 0;
630             s->ti_wptr = 0;
631         }
632         val = s->rregs[ESP_FIFO];
633         break;
634     case ESP_RINTR:
635         /*
636          * Clear sequence step, interrupt register and all status bits
637          * except TC
638          */
639         val = s->rregs[ESP_RINTR];
640         s->rregs[ESP_RINTR] = 0;
641         s->rregs[ESP_RSTAT] &= ~STAT_TC;
642         s->rregs[ESP_RSEQ] = SEQ_CD;
643         esp_lower_irq(s);
644         if (s->deferred_complete) {
645             esp_report_command_complete(s, s->deferred_status);
646             s->deferred_complete = false;
647         }
648         break;
649     case ESP_TCHI:
650         /* Return the unique id if the value has never been written */
651         if (!s->tchi_written) {
652             val = s->chip_id;
653         } else {
654             val = s->rregs[saddr];
655         }
656         break;
657     default:
658         val = s->rregs[saddr];
659         break;
660     }
661 
662     trace_esp_mem_readb(saddr, val);
663     return val;
664 }
665 
666 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
667 {
668     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
669     switch (saddr) {
670     case ESP_TCHI:
671         s->tchi_written = true;
672         /* fall through */
673     case ESP_TCLO:
674     case ESP_TCMID:
675         s->rregs[ESP_RSTAT] &= ~STAT_TC;
676         break;
677     case ESP_FIFO:
678         if (s->do_cmd) {
679             if (s->cmdlen < ESP_CMDBUF_SZ) {
680                 s->cmdbuf[s->cmdlen++] = val & 0xff;
681             } else {
682                 trace_esp_error_fifo_overrun();
683             }
684         } else if (s->ti_wptr == TI_BUFSZ - 1) {
685             trace_esp_error_fifo_overrun();
686         } else {
687             s->ti_size++;
688             s->ti_buf[s->ti_wptr++] = val & 0xff;
689         }
690         break;
691     case ESP_CMD:
692         s->rregs[saddr] = val;
693         if (val & CMD_DMA) {
694             s->dma = 1;
695             /* Reload DMA counter.  */
696             if (esp_get_stc(s) == 0) {
697                 esp_set_tc(s, 0x10000);
698             } else {
699                 esp_set_tc(s, esp_get_stc(s));
700             }
701         } else {
702             s->dma = 0;
703         }
704         switch (val & CMD_CMD) {
705         case CMD_NOP:
706             trace_esp_mem_writeb_cmd_nop(val);
707             break;
708         case CMD_FLUSH:
709             trace_esp_mem_writeb_cmd_flush(val);
710             /*s->ti_size = 0;*/
711             s->rregs[ESP_RINTR] = INTR_FC;
712             s->rregs[ESP_RSEQ] = 0;
713             s->rregs[ESP_RFLAGS] = 0;
714             break;
715         case CMD_RESET:
716             trace_esp_mem_writeb_cmd_reset(val);
717             esp_soft_reset(s);
718             break;
719         case CMD_BUSRESET:
720             trace_esp_mem_writeb_cmd_bus_reset(val);
721             s->rregs[ESP_RINTR] = INTR_RST;
722             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
723                 esp_raise_irq(s);
724             }
725             break;
726         case CMD_TI:
727             trace_esp_mem_writeb_cmd_ti(val);
728             handle_ti(s);
729             break;
730         case CMD_ICCS:
731             trace_esp_mem_writeb_cmd_iccs(val);
732             write_response(s);
733             s->rregs[ESP_RINTR] = INTR_FC;
734             s->rregs[ESP_RSTAT] |= STAT_MI;
735             break;
736         case CMD_MSGACC:
737             trace_esp_mem_writeb_cmd_msgacc(val);
738             s->rregs[ESP_RINTR] = INTR_DC;
739             s->rregs[ESP_RSEQ] = 0;
740             s->rregs[ESP_RFLAGS] = 0;
741             esp_raise_irq(s);
742             break;
743         case CMD_PAD:
744             trace_esp_mem_writeb_cmd_pad(val);
745             s->rregs[ESP_RSTAT] = STAT_TC;
746             s->rregs[ESP_RINTR] = INTR_FC;
747             s->rregs[ESP_RSEQ] = 0;
748             break;
749         case CMD_SATN:
750             trace_esp_mem_writeb_cmd_satn(val);
751             break;
752         case CMD_RSTATN:
753             trace_esp_mem_writeb_cmd_rstatn(val);
754             break;
755         case CMD_SEL:
756             trace_esp_mem_writeb_cmd_sel(val);
757             handle_s_without_atn(s);
758             break;
759         case CMD_SELATN:
760             trace_esp_mem_writeb_cmd_selatn(val);
761             handle_satn(s);
762             break;
763         case CMD_SELATNS:
764             trace_esp_mem_writeb_cmd_selatns(val);
765             handle_satn_stop(s);
766             break;
767         case CMD_ENSEL:
768             trace_esp_mem_writeb_cmd_ensel(val);
769             s->rregs[ESP_RINTR] = 0;
770             break;
771         case CMD_DISSEL:
772             trace_esp_mem_writeb_cmd_dissel(val);
773             s->rregs[ESP_RINTR] = 0;
774             esp_raise_irq(s);
775             break;
776         default:
777             trace_esp_error_unhandled_command(val);
778             break;
779         }
780         break;
781     case ESP_WBUSID ... ESP_WSYNO:
782         break;
783     case ESP_CFG1:
784     case ESP_CFG2: case ESP_CFG3:
785     case ESP_RES3: case ESP_RES4:
786         s->rregs[saddr] = val;
787         break;
788     case ESP_WCCF ... ESP_WTEST:
789         break;
790     default:
791         trace_esp_error_invalid_write(val, saddr);
792         return;
793     }
794     s->wregs[saddr] = val;
795 }
796 
797 static bool esp_mem_accepts(void *opaque, hwaddr addr,
798                             unsigned size, bool is_write,
799                             MemTxAttrs attrs)
800 {
801     return (size == 1) || (is_write && size == 4);
802 }
803 
804 static bool esp_pdma_needed(void *opaque)
805 {
806     ESPState *s = opaque;
807     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
808            s->dma_enabled;
809 }
810 
811 static const VMStateDescription vmstate_esp_pdma = {
812     .name = "esp/pdma",
813     .version_id = 1,
814     .minimum_version_id = 1,
815     .needed = esp_pdma_needed,
816     .fields = (VMStateField[]) {
817         VMSTATE_BUFFER(pdma_buf, ESPState),
818         VMSTATE_INT32(pdma_origin, ESPState),
819         VMSTATE_UINT32(pdma_len, ESPState),
820         VMSTATE_UINT32(pdma_start, ESPState),
821         VMSTATE_UINT32(pdma_cur, ESPState),
822         VMSTATE_END_OF_LIST()
823     }
824 };
825 
826 static int esp_pre_save(void *opaque)
827 {
828     ESPState *s = ESP(opaque);
829 
830     s->mig_version_id = vmstate_esp.version_id;
831     return 0;
832 }
833 
834 static int esp_post_load(void *opaque, int version_id)
835 {
836     ESPState *s = ESP(opaque);
837 
838     s->mig_version_id = vmstate_esp.version_id;
839     return 0;
840 }
841 
842 const VMStateDescription vmstate_esp = {
843     .name = "esp",
844     .version_id = 5,
845     .minimum_version_id = 3,
846     .pre_save = esp_pre_save,
847     .post_load = esp_post_load,
848     .fields = (VMStateField[]) {
849         VMSTATE_BUFFER(rregs, ESPState),
850         VMSTATE_BUFFER(wregs, ESPState),
851         VMSTATE_INT32(ti_size, ESPState),
852         VMSTATE_UINT32(ti_rptr, ESPState),
853         VMSTATE_UINT32(ti_wptr, ESPState),
854         VMSTATE_BUFFER(ti_buf, ESPState),
855         VMSTATE_UINT32(status, ESPState),
856         VMSTATE_UINT32(deferred_status, ESPState),
857         VMSTATE_BOOL(deferred_complete, ESPState),
858         VMSTATE_UINT32(dma, ESPState),
859         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
860         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
861         VMSTATE_UINT32(cmdlen, ESPState),
862         VMSTATE_UINT32(do_cmd, ESPState),
863         VMSTATE_UINT32(dma_left, ESPState),
864         VMSTATE_END_OF_LIST()
865     },
866     .subsections = (const VMStateDescription * []) {
867         &vmstate_esp_pdma,
868         NULL
869     }
870 };
871 
872 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
873                                  uint64_t val, unsigned int size)
874 {
875     SysBusESPState *sysbus = opaque;
876     ESPState *s = ESP(&sysbus->esp);
877     uint32_t saddr;
878 
879     saddr = addr >> sysbus->it_shift;
880     esp_reg_write(s, saddr, val);
881 }
882 
883 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
884                                     unsigned int size)
885 {
886     SysBusESPState *sysbus = opaque;
887     ESPState *s = ESP(&sysbus->esp);
888     uint32_t saddr;
889 
890     saddr = addr >> sysbus->it_shift;
891     return esp_reg_read(s, saddr);
892 }
893 
894 static const MemoryRegionOps sysbus_esp_mem_ops = {
895     .read = sysbus_esp_mem_read,
896     .write = sysbus_esp_mem_write,
897     .endianness = DEVICE_NATIVE_ENDIAN,
898     .valid.accepts = esp_mem_accepts,
899 };
900 
901 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
902                                   uint64_t val, unsigned int size)
903 {
904     SysBusESPState *sysbus = opaque;
905     ESPState *s = ESP(&sysbus->esp);
906     uint32_t dmalen;
907     uint8_t *buf = get_pdma_buf(s);
908 
909     trace_esp_pdma_write(size);
910 
911     dmalen = esp_get_tc(s);
912     if (dmalen == 0 || s->pdma_len == 0) {
913         return;
914     }
915     switch (size) {
916     case 1:
917         buf[s->pdma_cur++] = val;
918         s->pdma_len--;
919         dmalen--;
920         break;
921     case 2:
922         buf[s->pdma_cur++] = val >> 8;
923         buf[s->pdma_cur++] = val;
924         s->pdma_len -= 2;
925         dmalen -= 2;
926         break;
927     }
928     esp_set_tc(s, dmalen);
929     if (s->pdma_len == 0 && s->pdma_cb) {
930         esp_lower_drq(s);
931         s->pdma_cb(s);
932         s->pdma_cb = NULL;
933     }
934 }
935 
936 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
937                                      unsigned int size)
938 {
939     SysBusESPState *sysbus = opaque;
940     ESPState *s = ESP(&sysbus->esp);
941     uint8_t *buf = get_pdma_buf(s);
942     uint64_t val = 0;
943 
944     trace_esp_pdma_read(size);
945 
946     if (s->pdma_len == 0) {
947         return 0;
948     }
949     switch (size) {
950     case 1:
951         val = buf[s->pdma_cur++];
952         s->pdma_len--;
953         break;
954     case 2:
955         val = buf[s->pdma_cur++];
956         val = (val << 8) | buf[s->pdma_cur++];
957         s->pdma_len -= 2;
958         break;
959     }
960 
961     if (s->pdma_len == 0 && s->pdma_cb) {
962         esp_lower_drq(s);
963         s->pdma_cb(s);
964         s->pdma_cb = NULL;
965     }
966     return val;
967 }
968 
969 static const MemoryRegionOps sysbus_esp_pdma_ops = {
970     .read = sysbus_esp_pdma_read,
971     .write = sysbus_esp_pdma_write,
972     .endianness = DEVICE_NATIVE_ENDIAN,
973     .valid.min_access_size = 1,
974     .valid.max_access_size = 2,
975 };
976 
977 static const struct SCSIBusInfo esp_scsi_info = {
978     .tcq = false,
979     .max_target = ESP_MAX_DEVS,
980     .max_lun = 7,
981 
982     .transfer_data = esp_transfer_data,
983     .complete = esp_command_complete,
984     .cancel = esp_request_cancelled
985 };
986 
987 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
988 {
989     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
990     ESPState *s = ESP(&sysbus->esp);
991 
992     switch (irq) {
993     case 0:
994         parent_esp_reset(s, irq, level);
995         break;
996     case 1:
997         esp_dma_enable(opaque, irq, level);
998         break;
999     }
1000 }
1001 
1002 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1003 {
1004     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1005     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1006     ESPState *s = ESP(&sysbus->esp);
1007 
1008     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1009         return;
1010     }
1011 
1012     sysbus_init_irq(sbd, &s->irq);
1013     sysbus_init_irq(sbd, &s->irq_data);
1014     assert(sysbus->it_shift != -1);
1015 
1016     s->chip_id = TCHI_FAS100A;
1017     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1018                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1019     sysbus_init_mmio(sbd, &sysbus->iomem);
1020     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1021                           sysbus, "esp-pdma", 2);
1022     sysbus_init_mmio(sbd, &sysbus->pdma);
1023 
1024     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1025 
1026     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1027 }
1028 
1029 static void sysbus_esp_hard_reset(DeviceState *dev)
1030 {
1031     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1032     ESPState *s = ESP(&sysbus->esp);
1033 
1034     esp_hard_reset(s);
1035 }
1036 
1037 static void sysbus_esp_init(Object *obj)
1038 {
1039     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1040 
1041     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1042 }
1043 
1044 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1045     .name = "sysbusespscsi",
1046     .version_id = 2,
1047     .minimum_version_id = 1,
1048     .fields = (VMStateField[]) {
1049         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1050         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1051         VMSTATE_END_OF_LIST()
1052     }
1053 };
1054 
1055 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1056 {
1057     DeviceClass *dc = DEVICE_CLASS(klass);
1058 
1059     dc->realize = sysbus_esp_realize;
1060     dc->reset = sysbus_esp_hard_reset;
1061     dc->vmsd = &vmstate_sysbus_esp_scsi;
1062     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1063 }
1064 
1065 static const TypeInfo sysbus_esp_info = {
1066     .name          = TYPE_SYSBUS_ESP,
1067     .parent        = TYPE_SYS_BUS_DEVICE,
1068     .instance_init = sysbus_esp_init,
1069     .instance_size = sizeof(SysBusESPState),
1070     .class_init    = sysbus_esp_class_init,
1071 };
1072 
1073 static void esp_class_init(ObjectClass *klass, void *data)
1074 {
1075     DeviceClass *dc = DEVICE_CLASS(klass);
1076 
1077     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1078     dc->user_creatable = false;
1079     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1080 }
1081 
1082 static const TypeInfo esp_info = {
1083     .name = TYPE_ESP,
1084     .parent = TYPE_DEVICE,
1085     .instance_size = sizeof(ESPState),
1086     .class_init = esp_class_init,
1087 };
1088 
1089 static void esp_register_types(void)
1090 {
1091     type_register_static(&sysbus_esp_info);
1092     type_register_static(&esp_info);
1093 }
1094 
1095 type_init(esp_register_types)
1096