xref: /qemu/hw/scsi/esp.c (revision 84fbefedfecafba2b339d17c9133225467b90c2e)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66 }
67 
68 static void esp_lower_drq(ESPState *s)
69 {
70     qemu_irq_lower(s->irq_data);
71 }
72 
73 void esp_dma_enable(ESPState *s, int irq, int level)
74 {
75     if (level) {
76         s->dma_enabled = 1;
77         trace_esp_dma_enable();
78         if (s->dma_cb) {
79             s->dma_cb(s);
80             s->dma_cb = NULL;
81         }
82     } else {
83         trace_esp_dma_disable();
84         s->dma_enabled = 0;
85     }
86 }
87 
88 void esp_request_cancelled(SCSIRequest *req)
89 {
90     ESPState *s = req->hba_private;
91 
92     if (req == s->current_req) {
93         scsi_req_unref(s->current_req);
94         s->current_req = NULL;
95         s->current_dev = NULL;
96     }
97 }
98 
99 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
100                      uint32_t index, uint32_t len)
101 {
102     s->pdma_origin = origin;
103     s->pdma_start = index;
104     s->pdma_cur = index;
105     s->pdma_len = len;
106 }
107 
108 static uint8_t *get_pdma_buf(ESPState *s)
109 {
110     switch (s->pdma_origin) {
111     case PDMA:
112         return s->pdma_buf;
113     case TI:
114         return s->ti_buf;
115     case CMD:
116         return s->cmdbuf;
117     case ASYNC:
118         return s->async_buf;
119     }
120     return NULL;
121 }
122 
123 static int get_cmd_cb(ESPState *s)
124 {
125     int target;
126 
127     target = s->wregs[ESP_WBUSID] & BUSID_DID;
128 
129     s->ti_size = 0;
130     s->ti_rptr = 0;
131     s->ti_wptr = 0;
132 
133     if (s->current_req) {
134         /* Started a new command before the old one finished.  Cancel it.  */
135         scsi_req_cancel(s->current_req);
136         s->async_len = 0;
137     }
138 
139     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
140     if (!s->current_dev) {
141         /* No such drive */
142         s->rregs[ESP_RSTAT] = 0;
143         s->rregs[ESP_RINTR] = INTR_DC;
144         s->rregs[ESP_RSEQ] = SEQ_0;
145         esp_raise_irq(s);
146         return -1;
147     }
148     return 0;
149 }
150 
151 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
152 {
153     uint32_t dmalen;
154     int target;
155 
156     target = s->wregs[ESP_WBUSID] & BUSID_DID;
157     if (s->dma) {
158         dmalen = s->rregs[ESP_TCLO];
159         dmalen |= s->rregs[ESP_TCMID] << 8;
160         dmalen |= s->rregs[ESP_TCHI] << 16;
161         if (dmalen > buflen) {
162             return 0;
163         }
164         if (s->dma_memory_read) {
165             s->dma_memory_read(s->dma_opaque, buf, dmalen);
166         } else {
167             memcpy(s->pdma_buf, buf, dmalen);
168             set_pdma(s, PDMA, 0, dmalen);
169             esp_raise_drq(s);
170             return 0;
171         }
172     } else {
173         dmalen = s->ti_size;
174         if (dmalen > TI_BUFSZ) {
175             return 0;
176         }
177         memcpy(buf, s->ti_buf, dmalen);
178         buf[0] = buf[2] >> 5;
179     }
180     trace_esp_get_cmd(dmalen, target);
181 
182     if (get_cmd_cb(s) < 0) {
183         return 0;
184     }
185     return dmalen;
186 }
187 
188 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
189 {
190     int32_t datalen;
191     int lun;
192     SCSIDevice *current_lun;
193 
194     trace_esp_do_busid_cmd(busid);
195     lun = busid & 7;
196     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
197     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
198     datalen = scsi_req_enqueue(s->current_req);
199     s->ti_size = datalen;
200     if (datalen != 0) {
201         s->rregs[ESP_RSTAT] = STAT_TC;
202         s->dma_left = 0;
203         s->dma_counter = 0;
204         if (datalen > 0) {
205             s->rregs[ESP_RSTAT] |= STAT_DI;
206         } else {
207             s->rregs[ESP_RSTAT] |= STAT_DO;
208         }
209         scsi_req_continue(s->current_req);
210     }
211     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
212     s->rregs[ESP_RSEQ] = SEQ_CD;
213     esp_raise_irq(s);
214 }
215 
216 static void do_cmd(ESPState *s, uint8_t *buf)
217 {
218     uint8_t busid = buf[0];
219 
220     do_busid_cmd(s, &buf[1], busid);
221 }
222 
223 static void satn_pdma_cb(ESPState *s)
224 {
225     if (get_cmd_cb(s) < 0) {
226         return;
227     }
228     if (s->pdma_cur != s->pdma_start) {
229         do_cmd(s, get_pdma_buf(s) + s->pdma_start);
230     }
231 }
232 
233 static void handle_satn(ESPState *s)
234 {
235     uint8_t buf[32];
236     int len;
237 
238     if (s->dma && !s->dma_enabled) {
239         s->dma_cb = handle_satn;
240         return;
241     }
242     s->pdma_cb = satn_pdma_cb;
243     len = get_cmd(s, buf, sizeof(buf));
244     if (len) {
245         do_cmd(s, buf);
246     }
247 }
248 
249 static void s_without_satn_pdma_cb(ESPState *s)
250 {
251     if (get_cmd_cb(s) < 0) {
252         return;
253     }
254     if (s->pdma_cur != s->pdma_start) {
255         do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
256     }
257 }
258 
259 static void handle_s_without_atn(ESPState *s)
260 {
261     uint8_t buf[32];
262     int len;
263 
264     if (s->dma && !s->dma_enabled) {
265         s->dma_cb = handle_s_without_atn;
266         return;
267     }
268     s->pdma_cb = s_without_satn_pdma_cb;
269     len = get_cmd(s, buf, sizeof(buf));
270     if (len) {
271         do_busid_cmd(s, buf, 0);
272     }
273 }
274 
275 static void satn_stop_pdma_cb(ESPState *s)
276 {
277     if (get_cmd_cb(s) < 0) {
278         return;
279     }
280     s->cmdlen = s->pdma_cur - s->pdma_start;
281     if (s->cmdlen) {
282         trace_esp_handle_satn_stop(s->cmdlen);
283         s->do_cmd = 1;
284         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
285         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
286         s->rregs[ESP_RSEQ] = SEQ_CD;
287         esp_raise_irq(s);
288     }
289 }
290 
291 static void handle_satn_stop(ESPState *s)
292 {
293     if (s->dma && !s->dma_enabled) {
294         s->dma_cb = handle_satn_stop;
295         return;
296     }
297     s->pdma_cb = satn_stop_pdma_cb;
298     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
299     if (s->cmdlen) {
300         trace_esp_handle_satn_stop(s->cmdlen);
301         s->do_cmd = 1;
302         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
303         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
304         s->rregs[ESP_RSEQ] = SEQ_CD;
305         esp_raise_irq(s);
306     }
307 }
308 
309 static void write_response_pdma_cb(ESPState *s)
310 {
311     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
312     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
313     s->rregs[ESP_RSEQ] = SEQ_CD;
314     esp_raise_irq(s);
315 }
316 
317 static void write_response(ESPState *s)
318 {
319     trace_esp_write_response(s->status);
320     s->ti_buf[0] = s->status;
321     s->ti_buf[1] = 0;
322     if (s->dma) {
323         if (s->dma_memory_write) {
324             s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
325             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
326             s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
327             s->rregs[ESP_RSEQ] = SEQ_CD;
328         } else {
329             set_pdma(s, TI, 0, 2);
330             s->pdma_cb = write_response_pdma_cb;
331             esp_raise_drq(s);
332             return;
333         }
334     } else {
335         s->ti_size = 2;
336         s->ti_rptr = 0;
337         s->ti_wptr = 2;
338         s->rregs[ESP_RFLAGS] = 2;
339     }
340     esp_raise_irq(s);
341 }
342 
343 static void esp_dma_done(ESPState *s)
344 {
345     s->rregs[ESP_RSTAT] |= STAT_TC;
346     s->rregs[ESP_RINTR] = INTR_BS;
347     s->rregs[ESP_RSEQ] = 0;
348     s->rregs[ESP_RFLAGS] = 0;
349     s->rregs[ESP_TCLO] = 0;
350     s->rregs[ESP_TCMID] = 0;
351     s->rregs[ESP_TCHI] = 0;
352     esp_raise_irq(s);
353 }
354 
355 static void do_dma_pdma_cb(ESPState *s)
356 {
357     int to_device = (s->ti_size < 0);
358     int len = s->pdma_cur - s->pdma_start;
359     if (s->do_cmd) {
360         s->ti_size = 0;
361         s->cmdlen = 0;
362         s->do_cmd = 0;
363         do_cmd(s, s->cmdbuf);
364         return;
365     }
366     s->dma_left -= len;
367     s->async_buf += len;
368     s->async_len -= len;
369     if (to_device) {
370         s->ti_size += len;
371     } else {
372         s->ti_size -= len;
373     }
374     if (s->async_len == 0) {
375         scsi_req_continue(s->current_req);
376         /*
377          * If there is still data to be read from the device then
378          * complete the DMA operation immediately.  Otherwise defer
379          * until the scsi layer has completed.
380          */
381         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
382             return;
383         }
384     }
385 
386     /* Partially filled a scsi buffer. Complete immediately.  */
387     esp_dma_done(s);
388 }
389 
390 static void esp_do_dma(ESPState *s)
391 {
392     uint32_t len;
393     int to_device;
394 
395     len = s->dma_left;
396     if (s->do_cmd) {
397         /*
398          * handle_ti_cmd() case: esp_do_dma() is called only from
399          * handle_ti_cmd() with do_cmd != NULL (see the assert())
400          */
401         trace_esp_do_dma(s->cmdlen, len);
402         assert(s->cmdlen <= sizeof(s->cmdbuf) &&
403                len <= sizeof(s->cmdbuf) - s->cmdlen);
404         if (s->dma_memory_read) {
405             s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
406         } else {
407             set_pdma(s, CMD, s->cmdlen, len);
408             s->pdma_cb = do_dma_pdma_cb;
409             esp_raise_drq(s);
410             return;
411         }
412         trace_esp_handle_ti_cmd(s->cmdlen);
413         s->ti_size = 0;
414         s->cmdlen = 0;
415         s->do_cmd = 0;
416         do_cmd(s, s->cmdbuf);
417         return;
418     }
419     if (s->async_len == 0) {
420         /* Defer until data is available.  */
421         return;
422     }
423     if (len > s->async_len) {
424         len = s->async_len;
425     }
426     to_device = (s->ti_size < 0);
427     if (to_device) {
428         if (s->dma_memory_read) {
429             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
430         } else {
431             set_pdma(s, ASYNC, 0, len);
432             s->pdma_cb = do_dma_pdma_cb;
433             esp_raise_drq(s);
434             return;
435         }
436     } else {
437         if (s->dma_memory_write) {
438             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
439         } else {
440             set_pdma(s, ASYNC, 0, len);
441             s->pdma_cb = do_dma_pdma_cb;
442             esp_raise_drq(s);
443             return;
444         }
445     }
446     s->dma_left -= len;
447     s->async_buf += len;
448     s->async_len -= len;
449     if (to_device) {
450         s->ti_size += len;
451     } else {
452         s->ti_size -= len;
453     }
454     if (s->async_len == 0) {
455         scsi_req_continue(s->current_req);
456         /*
457          * If there is still data to be read from the device then
458          * complete the DMA operation immediately.  Otherwise defer
459          * until the scsi layer has completed.
460          */
461         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
462             return;
463         }
464     }
465 
466     /* Partially filled a scsi buffer. Complete immediately.  */
467     esp_dma_done(s);
468 }
469 
470 static void esp_report_command_complete(ESPState *s, uint32_t status)
471 {
472     trace_esp_command_complete();
473     if (s->ti_size != 0) {
474         trace_esp_command_complete_unexpected();
475     }
476     s->ti_size = 0;
477     s->dma_left = 0;
478     s->async_len = 0;
479     if (status) {
480         trace_esp_command_complete_fail();
481     }
482     s->status = status;
483     s->rregs[ESP_RSTAT] = STAT_ST;
484     esp_dma_done(s);
485     if (s->current_req) {
486         scsi_req_unref(s->current_req);
487         s->current_req = NULL;
488         s->current_dev = NULL;
489     }
490 }
491 
492 void esp_command_complete(SCSIRequest *req, size_t resid)
493 {
494     ESPState *s = req->hba_private;
495 
496     if (s->rregs[ESP_RSTAT] & STAT_INT) {
497         /*
498          * Defer handling command complete until the previous
499          * interrupt has been handled.
500          */
501         trace_esp_command_complete_deferred();
502         s->deferred_status = req->status;
503         s->deferred_complete = true;
504         return;
505     }
506     esp_report_command_complete(s, req->status);
507 }
508 
509 void esp_transfer_data(SCSIRequest *req, uint32_t len)
510 {
511     ESPState *s = req->hba_private;
512 
513     assert(!s->do_cmd);
514     trace_esp_transfer_data(s->dma_left, s->ti_size);
515     s->async_len = len;
516     s->async_buf = scsi_req_get_buf(req);
517     if (s->dma_left) {
518         esp_do_dma(s);
519     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
520         /*
521          * If this was the last part of a DMA transfer then the
522          * completion interrupt is deferred to here.
523          */
524         esp_dma_done(s);
525     }
526 }
527 
528 static void handle_ti(ESPState *s)
529 {
530     uint32_t dmalen, minlen;
531 
532     if (s->dma && !s->dma_enabled) {
533         s->dma_cb = handle_ti;
534         return;
535     }
536 
537     dmalen = s->rregs[ESP_TCLO];
538     dmalen |= s->rregs[ESP_TCMID] << 8;
539     dmalen |= s->rregs[ESP_TCHI] << 16;
540     if (dmalen == 0) {
541         dmalen = 0x10000;
542     }
543     s->dma_counter = dmalen;
544 
545     if (s->do_cmd) {
546         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
547     } else if (s->ti_size < 0) {
548         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
549     } else {
550         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
551     }
552     trace_esp_handle_ti(minlen);
553     if (s->dma) {
554         s->dma_left = minlen;
555         s->rregs[ESP_RSTAT] &= ~STAT_TC;
556         esp_do_dma(s);
557     } else if (s->do_cmd) {
558         trace_esp_handle_ti_cmd(s->cmdlen);
559         s->ti_size = 0;
560         s->cmdlen = 0;
561         s->do_cmd = 0;
562         do_cmd(s, s->cmdbuf);
563     }
564 }
565 
566 void esp_hard_reset(ESPState *s)
567 {
568     memset(s->rregs, 0, ESP_REGS);
569     memset(s->wregs, 0, ESP_REGS);
570     s->tchi_written = 0;
571     s->ti_size = 0;
572     s->ti_rptr = 0;
573     s->ti_wptr = 0;
574     s->dma = 0;
575     s->do_cmd = 0;
576     s->dma_cb = NULL;
577 
578     s->rregs[ESP_CFG1] = 7;
579 }
580 
581 static void esp_soft_reset(ESPState *s)
582 {
583     qemu_irq_lower(s->irq);
584     qemu_irq_lower(s->irq_data);
585     esp_hard_reset(s);
586 }
587 
588 static void parent_esp_reset(ESPState *s, int irq, int level)
589 {
590     if (level) {
591         esp_soft_reset(s);
592     }
593 }
594 
595 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
596 {
597     uint32_t old_val;
598 
599     trace_esp_mem_readb(saddr, s->rregs[saddr]);
600     switch (saddr) {
601     case ESP_FIFO:
602         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
603             /* Data out.  */
604             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
605             s->rregs[ESP_FIFO] = 0;
606         } else if (s->ti_rptr < s->ti_wptr) {
607             s->ti_size--;
608             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
609         }
610         if (s->ti_rptr == s->ti_wptr) {
611             s->ti_rptr = 0;
612             s->ti_wptr = 0;
613         }
614         break;
615     case ESP_RINTR:
616         /*
617          * Clear sequence step, interrupt register and all status bits
618          * except TC
619          */
620         old_val = s->rregs[ESP_RINTR];
621         s->rregs[ESP_RINTR] = 0;
622         s->rregs[ESP_RSTAT] &= ~STAT_TC;
623         s->rregs[ESP_RSEQ] = SEQ_CD;
624         esp_lower_irq(s);
625         if (s->deferred_complete) {
626             esp_report_command_complete(s, s->deferred_status);
627             s->deferred_complete = false;
628         }
629         return old_val;
630     case ESP_TCHI:
631         /* Return the unique id if the value has never been written */
632         if (!s->tchi_written) {
633             return s->chip_id;
634         }
635     default:
636         break;
637     }
638     return s->rregs[saddr];
639 }
640 
641 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
642 {
643     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
644     switch (saddr) {
645     case ESP_TCHI:
646         s->tchi_written = true;
647         /* fall through */
648     case ESP_TCLO:
649     case ESP_TCMID:
650         s->rregs[ESP_RSTAT] &= ~STAT_TC;
651         break;
652     case ESP_FIFO:
653         if (s->do_cmd) {
654             if (s->cmdlen < ESP_CMDBUF_SZ) {
655                 s->cmdbuf[s->cmdlen++] = val & 0xff;
656             } else {
657                 trace_esp_error_fifo_overrun();
658             }
659         } else if (s->ti_wptr == TI_BUFSZ - 1) {
660             trace_esp_error_fifo_overrun();
661         } else {
662             s->ti_size++;
663             s->ti_buf[s->ti_wptr++] = val & 0xff;
664         }
665         break;
666     case ESP_CMD:
667         s->rregs[saddr] = val;
668         if (val & CMD_DMA) {
669             s->dma = 1;
670             /* Reload DMA counter.  */
671             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
672             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
673             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
674         } else {
675             s->dma = 0;
676         }
677         switch (val & CMD_CMD) {
678         case CMD_NOP:
679             trace_esp_mem_writeb_cmd_nop(val);
680             break;
681         case CMD_FLUSH:
682             trace_esp_mem_writeb_cmd_flush(val);
683             /*s->ti_size = 0;*/
684             s->rregs[ESP_RINTR] = INTR_FC;
685             s->rregs[ESP_RSEQ] = 0;
686             s->rregs[ESP_RFLAGS] = 0;
687             break;
688         case CMD_RESET:
689             trace_esp_mem_writeb_cmd_reset(val);
690             esp_soft_reset(s);
691             break;
692         case CMD_BUSRESET:
693             trace_esp_mem_writeb_cmd_bus_reset(val);
694             s->rregs[ESP_RINTR] = INTR_RST;
695             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
696                 esp_raise_irq(s);
697             }
698             break;
699         case CMD_TI:
700             handle_ti(s);
701             break;
702         case CMD_ICCS:
703             trace_esp_mem_writeb_cmd_iccs(val);
704             write_response(s);
705             s->rregs[ESP_RINTR] = INTR_FC;
706             s->rregs[ESP_RSTAT] |= STAT_MI;
707             break;
708         case CMD_MSGACC:
709             trace_esp_mem_writeb_cmd_msgacc(val);
710             s->rregs[ESP_RINTR] = INTR_DC;
711             s->rregs[ESP_RSEQ] = 0;
712             s->rregs[ESP_RFLAGS] = 0;
713             esp_raise_irq(s);
714             break;
715         case CMD_PAD:
716             trace_esp_mem_writeb_cmd_pad(val);
717             s->rregs[ESP_RSTAT] = STAT_TC;
718             s->rregs[ESP_RINTR] = INTR_FC;
719             s->rregs[ESP_RSEQ] = 0;
720             break;
721         case CMD_SATN:
722             trace_esp_mem_writeb_cmd_satn(val);
723             break;
724         case CMD_RSTATN:
725             trace_esp_mem_writeb_cmd_rstatn(val);
726             break;
727         case CMD_SEL:
728             trace_esp_mem_writeb_cmd_sel(val);
729             handle_s_without_atn(s);
730             break;
731         case CMD_SELATN:
732             trace_esp_mem_writeb_cmd_selatn(val);
733             handle_satn(s);
734             break;
735         case CMD_SELATNS:
736             trace_esp_mem_writeb_cmd_selatns(val);
737             handle_satn_stop(s);
738             break;
739         case CMD_ENSEL:
740             trace_esp_mem_writeb_cmd_ensel(val);
741             s->rregs[ESP_RINTR] = 0;
742             break;
743         case CMD_DISSEL:
744             trace_esp_mem_writeb_cmd_dissel(val);
745             s->rregs[ESP_RINTR] = 0;
746             esp_raise_irq(s);
747             break;
748         default:
749             trace_esp_error_unhandled_command(val);
750             break;
751         }
752         break;
753     case ESP_WBUSID ... ESP_WSYNO:
754         break;
755     case ESP_CFG1:
756     case ESP_CFG2: case ESP_CFG3:
757     case ESP_RES3: case ESP_RES4:
758         s->rregs[saddr] = val;
759         break;
760     case ESP_WCCF ... ESP_WTEST:
761         break;
762     default:
763         trace_esp_error_invalid_write(val, saddr);
764         return;
765     }
766     s->wregs[saddr] = val;
767 }
768 
769 static bool esp_mem_accepts(void *opaque, hwaddr addr,
770                             unsigned size, bool is_write,
771                             MemTxAttrs attrs)
772 {
773     return (size == 1) || (is_write && size == 4);
774 }
775 
776 static bool esp_pdma_needed(void *opaque)
777 {
778     ESPState *s = opaque;
779     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
780            s->dma_enabled;
781 }
782 
783 static const VMStateDescription vmstate_esp_pdma = {
784     .name = "esp/pdma",
785     .version_id = 1,
786     .minimum_version_id = 1,
787     .needed = esp_pdma_needed,
788     .fields = (VMStateField[]) {
789         VMSTATE_BUFFER(pdma_buf, ESPState),
790         VMSTATE_INT32(pdma_origin, ESPState),
791         VMSTATE_UINT32(pdma_len, ESPState),
792         VMSTATE_UINT32(pdma_start, ESPState),
793         VMSTATE_UINT32(pdma_cur, ESPState),
794         VMSTATE_END_OF_LIST()
795     }
796 };
797 
798 const VMStateDescription vmstate_esp = {
799     .name = "esp",
800     .version_id = 4,
801     .minimum_version_id = 3,
802     .fields = (VMStateField[]) {
803         VMSTATE_BUFFER(rregs, ESPState),
804         VMSTATE_BUFFER(wregs, ESPState),
805         VMSTATE_INT32(ti_size, ESPState),
806         VMSTATE_UINT32(ti_rptr, ESPState),
807         VMSTATE_UINT32(ti_wptr, ESPState),
808         VMSTATE_BUFFER(ti_buf, ESPState),
809         VMSTATE_UINT32(status, ESPState),
810         VMSTATE_UINT32(deferred_status, ESPState),
811         VMSTATE_BOOL(deferred_complete, ESPState),
812         VMSTATE_UINT32(dma, ESPState),
813         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
814         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
815         VMSTATE_UINT32(cmdlen, ESPState),
816         VMSTATE_UINT32(do_cmd, ESPState),
817         VMSTATE_UINT32(dma_left, ESPState),
818         VMSTATE_END_OF_LIST()
819     },
820     .subsections = (const VMStateDescription * []) {
821         &vmstate_esp_pdma,
822         NULL
823     }
824 };
825 
826 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
827                                  uint64_t val, unsigned int size)
828 {
829     SysBusESPState *sysbus = opaque;
830     uint32_t saddr;
831 
832     saddr = addr >> sysbus->it_shift;
833     esp_reg_write(&sysbus->esp, saddr, val);
834 }
835 
836 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
837                                     unsigned int size)
838 {
839     SysBusESPState *sysbus = opaque;
840     uint32_t saddr;
841 
842     saddr = addr >> sysbus->it_shift;
843     return esp_reg_read(&sysbus->esp, saddr);
844 }
845 
846 static const MemoryRegionOps sysbus_esp_mem_ops = {
847     .read = sysbus_esp_mem_read,
848     .write = sysbus_esp_mem_write,
849     .endianness = DEVICE_NATIVE_ENDIAN,
850     .valid.accepts = esp_mem_accepts,
851 };
852 
853 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
854                                   uint64_t val, unsigned int size)
855 {
856     SysBusESPState *sysbus = opaque;
857     ESPState *s = &sysbus->esp;
858     uint32_t dmalen;
859     uint8_t *buf = get_pdma_buf(s);
860 
861     dmalen = s->rregs[ESP_TCLO];
862     dmalen |= s->rregs[ESP_TCMID] << 8;
863     dmalen |= s->rregs[ESP_TCHI] << 16;
864     if (dmalen == 0 || s->pdma_len == 0) {
865         return;
866     }
867     switch (size) {
868     case 1:
869         buf[s->pdma_cur++] = val;
870         s->pdma_len--;
871         dmalen--;
872         break;
873     case 2:
874         buf[s->pdma_cur++] = val >> 8;
875         buf[s->pdma_cur++] = val;
876         s->pdma_len -= 2;
877         dmalen -= 2;
878         break;
879     }
880     s->rregs[ESP_TCLO] = dmalen & 0xff;
881     s->rregs[ESP_TCMID] = dmalen >> 8;
882     s->rregs[ESP_TCHI] = dmalen >> 16;
883     if (s->pdma_len == 0 && s->pdma_cb) {
884         esp_lower_drq(s);
885         s->pdma_cb(s);
886         s->pdma_cb = NULL;
887     }
888 }
889 
890 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
891                                      unsigned int size)
892 {
893     SysBusESPState *sysbus = opaque;
894     ESPState *s = &sysbus->esp;
895     uint8_t *buf = get_pdma_buf(s);
896     uint64_t val = 0;
897 
898     if (s->pdma_len == 0) {
899         return 0;
900     }
901     switch (size) {
902     case 1:
903         val = buf[s->pdma_cur++];
904         s->pdma_len--;
905         break;
906     case 2:
907         val = buf[s->pdma_cur++];
908         val = (val << 8) | buf[s->pdma_cur++];
909         s->pdma_len -= 2;
910         break;
911     }
912 
913     if (s->pdma_len == 0 && s->pdma_cb) {
914         esp_lower_drq(s);
915         s->pdma_cb(s);
916         s->pdma_cb = NULL;
917     }
918     return val;
919 }
920 
921 static const MemoryRegionOps sysbus_esp_pdma_ops = {
922     .read = sysbus_esp_pdma_read,
923     .write = sysbus_esp_pdma_write,
924     .endianness = DEVICE_NATIVE_ENDIAN,
925     .valid.min_access_size = 1,
926     .valid.max_access_size = 2,
927 };
928 
929 static const struct SCSIBusInfo esp_scsi_info = {
930     .tcq = false,
931     .max_target = ESP_MAX_DEVS,
932     .max_lun = 7,
933 
934     .transfer_data = esp_transfer_data,
935     .complete = esp_command_complete,
936     .cancel = esp_request_cancelled
937 };
938 
939 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
940 {
941     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
942     ESPState *s = &sysbus->esp;
943 
944     switch (irq) {
945     case 0:
946         parent_esp_reset(s, irq, level);
947         break;
948     case 1:
949         esp_dma_enable(opaque, irq, level);
950         break;
951     }
952 }
953 
954 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
955 {
956     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
957     SysBusESPState *sysbus = SYSBUS_ESP(dev);
958     ESPState *s = &sysbus->esp;
959 
960     sysbus_init_irq(sbd, &s->irq);
961     sysbus_init_irq(sbd, &s->irq_data);
962     assert(sysbus->it_shift != -1);
963 
964     s->chip_id = TCHI_FAS100A;
965     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
966                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
967     sysbus_init_mmio(sbd, &sysbus->iomem);
968     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
969                           sysbus, "esp-pdma", 2);
970     sysbus_init_mmio(sbd, &sysbus->pdma);
971 
972     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
973 
974     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
975 }
976 
977 static void sysbus_esp_hard_reset(DeviceState *dev)
978 {
979     SysBusESPState *sysbus = SYSBUS_ESP(dev);
980     esp_hard_reset(&sysbus->esp);
981 }
982 
983 static const VMStateDescription vmstate_sysbus_esp_scsi = {
984     .name = "sysbusespscsi",
985     .version_id = 1,
986     .minimum_version_id = 1,
987     .fields = (VMStateField[]) {
988         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
989         VMSTATE_END_OF_LIST()
990     }
991 };
992 
993 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
994 {
995     DeviceClass *dc = DEVICE_CLASS(klass);
996 
997     dc->realize = sysbus_esp_realize;
998     dc->reset = sysbus_esp_hard_reset;
999     dc->vmsd = &vmstate_sysbus_esp_scsi;
1000     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1001 }
1002 
1003 static const TypeInfo sysbus_esp_info = {
1004     .name          = TYPE_SYSBUS_ESP,
1005     .parent        = TYPE_SYS_BUS_DEVICE,
1006     .instance_size = sizeof(SysBusESPState),
1007     .class_init    = sysbus_esp_class_init,
1008 };
1009 
1010 static void esp_register_types(void)
1011 {
1012     type_register_static(&sysbus_esp_info);
1013 }
1014 
1015 type_init(esp_register_types)
1016