xref: /qemu/hw/scsi/esp.c (revision 960ebfd94e8f6ea879472e7efb84b1704f685d39)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98     }
99 }
100 
101 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
102                      uint32_t index, uint32_t len)
103 {
104     s->pdma_origin = origin;
105     s->pdma_start = index;
106     s->pdma_cur = index;
107     s->pdma_len = len;
108 }
109 
110 static uint8_t *get_pdma_buf(ESPState *s)
111 {
112     switch (s->pdma_origin) {
113     case PDMA:
114         return s->pdma_buf;
115     case TI:
116         return s->ti_buf;
117     case CMD:
118         return s->cmdbuf;
119     case ASYNC:
120         return s->async_buf;
121     }
122     return NULL;
123 }
124 
125 static int get_cmd_cb(ESPState *s)
126 {
127     int target;
128 
129     target = s->wregs[ESP_WBUSID] & BUSID_DID;
130 
131     s->ti_size = 0;
132     s->ti_rptr = 0;
133     s->ti_wptr = 0;
134 
135     if (s->current_req) {
136         /* Started a new command before the old one finished.  Cancel it.  */
137         scsi_req_cancel(s->current_req);
138         s->async_len = 0;
139     }
140 
141     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
142     if (!s->current_dev) {
143         /* No such drive */
144         s->rregs[ESP_RSTAT] = 0;
145         s->rregs[ESP_RINTR] = INTR_DC;
146         s->rregs[ESP_RSEQ] = SEQ_0;
147         esp_raise_irq(s);
148         return -1;
149     }
150     return 0;
151 }
152 
153 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
154 {
155     uint32_t dmalen;
156     int target;
157 
158     target = s->wregs[ESP_WBUSID] & BUSID_DID;
159     if (s->dma) {
160         dmalen = s->rregs[ESP_TCLO];
161         dmalen |= s->rregs[ESP_TCMID] << 8;
162         dmalen |= s->rregs[ESP_TCHI] << 16;
163         if (dmalen > buflen) {
164             return 0;
165         }
166         if (s->dma_memory_read) {
167             s->dma_memory_read(s->dma_opaque, buf, dmalen);
168         } else {
169             memcpy(s->pdma_buf, buf, dmalen);
170             set_pdma(s, PDMA, 0, dmalen);
171             esp_raise_drq(s);
172             return 0;
173         }
174     } else {
175         dmalen = s->ti_size;
176         if (dmalen > TI_BUFSZ) {
177             return 0;
178         }
179         memcpy(buf, s->ti_buf, dmalen);
180         buf[0] = buf[2] >> 5;
181     }
182     trace_esp_get_cmd(dmalen, target);
183 
184     if (get_cmd_cb(s) < 0) {
185         return 0;
186     }
187     return dmalen;
188 }
189 
190 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
191 {
192     int32_t datalen;
193     int lun;
194     SCSIDevice *current_lun;
195 
196     trace_esp_do_busid_cmd(busid);
197     lun = busid & 7;
198     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
199     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
200     datalen = scsi_req_enqueue(s->current_req);
201     s->ti_size = datalen;
202     if (datalen != 0) {
203         s->rregs[ESP_RSTAT] = STAT_TC;
204         s->dma_left = 0;
205         s->dma_counter = 0;
206         if (datalen > 0) {
207             s->rregs[ESP_RSTAT] |= STAT_DI;
208         } else {
209             s->rregs[ESP_RSTAT] |= STAT_DO;
210         }
211         scsi_req_continue(s->current_req);
212     }
213     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
214     s->rregs[ESP_RSEQ] = SEQ_CD;
215     esp_raise_irq(s);
216 }
217 
218 static void do_cmd(ESPState *s, uint8_t *buf)
219 {
220     uint8_t busid = buf[0];
221 
222     do_busid_cmd(s, &buf[1], busid);
223 }
224 
225 static void satn_pdma_cb(ESPState *s)
226 {
227     if (get_cmd_cb(s) < 0) {
228         return;
229     }
230     if (s->pdma_cur != s->pdma_start) {
231         do_cmd(s, get_pdma_buf(s) + s->pdma_start);
232     }
233 }
234 
235 static void handle_satn(ESPState *s)
236 {
237     uint8_t buf[32];
238     int len;
239 
240     if (s->dma && !s->dma_enabled) {
241         s->dma_cb = handle_satn;
242         return;
243     }
244     s->pdma_cb = satn_pdma_cb;
245     len = get_cmd(s, buf, sizeof(buf));
246     if (len) {
247         do_cmd(s, buf);
248     }
249 }
250 
251 static void s_without_satn_pdma_cb(ESPState *s)
252 {
253     if (get_cmd_cb(s) < 0) {
254         return;
255     }
256     if (s->pdma_cur != s->pdma_start) {
257         do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
258     }
259 }
260 
261 static void handle_s_without_atn(ESPState *s)
262 {
263     uint8_t buf[32];
264     int len;
265 
266     if (s->dma && !s->dma_enabled) {
267         s->dma_cb = handle_s_without_atn;
268         return;
269     }
270     s->pdma_cb = s_without_satn_pdma_cb;
271     len = get_cmd(s, buf, sizeof(buf));
272     if (len) {
273         do_busid_cmd(s, buf, 0);
274     }
275 }
276 
277 static void satn_stop_pdma_cb(ESPState *s)
278 {
279     if (get_cmd_cb(s) < 0) {
280         return;
281     }
282     s->cmdlen = s->pdma_cur - s->pdma_start;
283     if (s->cmdlen) {
284         trace_esp_handle_satn_stop(s->cmdlen);
285         s->do_cmd = 1;
286         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
287         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
288         s->rregs[ESP_RSEQ] = SEQ_CD;
289         esp_raise_irq(s);
290     }
291 }
292 
293 static void handle_satn_stop(ESPState *s)
294 {
295     if (s->dma && !s->dma_enabled) {
296         s->dma_cb = handle_satn_stop;
297         return;
298     }
299     s->pdma_cb = satn_stop_pdma_cb;
300     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
301     if (s->cmdlen) {
302         trace_esp_handle_satn_stop(s->cmdlen);
303         s->do_cmd = 1;
304         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
305         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
306         s->rregs[ESP_RSEQ] = SEQ_CD;
307         esp_raise_irq(s);
308     }
309 }
310 
311 static void write_response_pdma_cb(ESPState *s)
312 {
313     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
314     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
315     s->rregs[ESP_RSEQ] = SEQ_CD;
316     esp_raise_irq(s);
317 }
318 
319 static void write_response(ESPState *s)
320 {
321     trace_esp_write_response(s->status);
322     s->ti_buf[0] = s->status;
323     s->ti_buf[1] = 0;
324     if (s->dma) {
325         if (s->dma_memory_write) {
326             s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
327             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
328             s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
329             s->rregs[ESP_RSEQ] = SEQ_CD;
330         } else {
331             set_pdma(s, TI, 0, 2);
332             s->pdma_cb = write_response_pdma_cb;
333             esp_raise_drq(s);
334             return;
335         }
336     } else {
337         s->ti_size = 2;
338         s->ti_rptr = 0;
339         s->ti_wptr = 2;
340         s->rregs[ESP_RFLAGS] = 2;
341     }
342     esp_raise_irq(s);
343 }
344 
345 static void esp_dma_done(ESPState *s)
346 {
347     s->rregs[ESP_RSTAT] |= STAT_TC;
348     s->rregs[ESP_RINTR] = INTR_BS;
349     s->rregs[ESP_RSEQ] = 0;
350     s->rregs[ESP_RFLAGS] = 0;
351     s->rregs[ESP_TCLO] = 0;
352     s->rregs[ESP_TCMID] = 0;
353     s->rregs[ESP_TCHI] = 0;
354     esp_raise_irq(s);
355 }
356 
357 static void do_dma_pdma_cb(ESPState *s)
358 {
359     int to_device = (s->ti_size < 0);
360     int len = s->pdma_cur - s->pdma_start;
361     if (s->do_cmd) {
362         s->ti_size = 0;
363         s->cmdlen = 0;
364         s->do_cmd = 0;
365         do_cmd(s, s->cmdbuf);
366         return;
367     }
368     s->dma_left -= len;
369     s->async_buf += len;
370     s->async_len -= len;
371     if (to_device) {
372         s->ti_size += len;
373     } else {
374         s->ti_size -= len;
375     }
376     if (s->async_len == 0) {
377         scsi_req_continue(s->current_req);
378         /*
379          * If there is still data to be read from the device then
380          * complete the DMA operation immediately.  Otherwise defer
381          * until the scsi layer has completed.
382          */
383         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
384             return;
385         }
386     }
387 
388     /* Partially filled a scsi buffer. Complete immediately.  */
389     esp_dma_done(s);
390 }
391 
392 static void esp_do_dma(ESPState *s)
393 {
394     uint32_t len;
395     int to_device;
396 
397     len = s->dma_left;
398     if (s->do_cmd) {
399         /*
400          * handle_ti_cmd() case: esp_do_dma() is called only from
401          * handle_ti_cmd() with do_cmd != NULL (see the assert())
402          */
403         trace_esp_do_dma(s->cmdlen, len);
404         assert(s->cmdlen <= sizeof(s->cmdbuf) &&
405                len <= sizeof(s->cmdbuf) - s->cmdlen);
406         if (s->dma_memory_read) {
407             s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
408         } else {
409             set_pdma(s, CMD, s->cmdlen, len);
410             s->pdma_cb = do_dma_pdma_cb;
411             esp_raise_drq(s);
412             return;
413         }
414         trace_esp_handle_ti_cmd(s->cmdlen);
415         s->ti_size = 0;
416         s->cmdlen = 0;
417         s->do_cmd = 0;
418         do_cmd(s, s->cmdbuf);
419         return;
420     }
421     if (s->async_len == 0) {
422         /* Defer until data is available.  */
423         return;
424     }
425     if (len > s->async_len) {
426         len = s->async_len;
427     }
428     to_device = (s->ti_size < 0);
429     if (to_device) {
430         if (s->dma_memory_read) {
431             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
432         } else {
433             set_pdma(s, ASYNC, 0, len);
434             s->pdma_cb = do_dma_pdma_cb;
435             esp_raise_drq(s);
436             return;
437         }
438     } else {
439         if (s->dma_memory_write) {
440             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
441         } else {
442             set_pdma(s, ASYNC, 0, len);
443             s->pdma_cb = do_dma_pdma_cb;
444             esp_raise_drq(s);
445             return;
446         }
447     }
448     s->dma_left -= len;
449     s->async_buf += len;
450     s->async_len -= len;
451     if (to_device) {
452         s->ti_size += len;
453     } else {
454         s->ti_size -= len;
455     }
456     if (s->async_len == 0) {
457         scsi_req_continue(s->current_req);
458         /*
459          * If there is still data to be read from the device then
460          * complete the DMA operation immediately.  Otherwise defer
461          * until the scsi layer has completed.
462          */
463         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
464             return;
465         }
466     }
467 
468     /* Partially filled a scsi buffer. Complete immediately.  */
469     esp_dma_done(s);
470 }
471 
472 static void esp_report_command_complete(ESPState *s, uint32_t status)
473 {
474     trace_esp_command_complete();
475     if (s->ti_size != 0) {
476         trace_esp_command_complete_unexpected();
477     }
478     s->ti_size = 0;
479     s->dma_left = 0;
480     s->async_len = 0;
481     if (status) {
482         trace_esp_command_complete_fail();
483     }
484     s->status = status;
485     s->rregs[ESP_RSTAT] = STAT_ST;
486     esp_dma_done(s);
487     if (s->current_req) {
488         scsi_req_unref(s->current_req);
489         s->current_req = NULL;
490         s->current_dev = NULL;
491     }
492 }
493 
494 void esp_command_complete(SCSIRequest *req, size_t resid)
495 {
496     ESPState *s = req->hba_private;
497 
498     if (s->rregs[ESP_RSTAT] & STAT_INT) {
499         /*
500          * Defer handling command complete until the previous
501          * interrupt has been handled.
502          */
503         trace_esp_command_complete_deferred();
504         s->deferred_status = req->status;
505         s->deferred_complete = true;
506         return;
507     }
508     esp_report_command_complete(s, req->status);
509 }
510 
511 void esp_transfer_data(SCSIRequest *req, uint32_t len)
512 {
513     ESPState *s = req->hba_private;
514 
515     assert(!s->do_cmd);
516     trace_esp_transfer_data(s->dma_left, s->ti_size);
517     s->async_len = len;
518     s->async_buf = scsi_req_get_buf(req);
519     if (s->dma_left) {
520         esp_do_dma(s);
521     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
522         /*
523          * If this was the last part of a DMA transfer then the
524          * completion interrupt is deferred to here.
525          */
526         esp_dma_done(s);
527     }
528 }
529 
530 static void handle_ti(ESPState *s)
531 {
532     uint32_t dmalen, minlen;
533 
534     if (s->dma && !s->dma_enabled) {
535         s->dma_cb = handle_ti;
536         return;
537     }
538 
539     dmalen = s->rregs[ESP_TCLO];
540     dmalen |= s->rregs[ESP_TCMID] << 8;
541     dmalen |= s->rregs[ESP_TCHI] << 16;
542     if (dmalen == 0) {
543         dmalen = 0x10000;
544     }
545     s->dma_counter = dmalen;
546 
547     if (s->do_cmd) {
548         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
549     } else if (s->ti_size < 0) {
550         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
551     } else {
552         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
553     }
554     trace_esp_handle_ti(minlen);
555     if (s->dma) {
556         s->dma_left = minlen;
557         s->rregs[ESP_RSTAT] &= ~STAT_TC;
558         esp_do_dma(s);
559     } else if (s->do_cmd) {
560         trace_esp_handle_ti_cmd(s->cmdlen);
561         s->ti_size = 0;
562         s->cmdlen = 0;
563         s->do_cmd = 0;
564         do_cmd(s, s->cmdbuf);
565     }
566 }
567 
568 void esp_hard_reset(ESPState *s)
569 {
570     memset(s->rregs, 0, ESP_REGS);
571     memset(s->wregs, 0, ESP_REGS);
572     s->tchi_written = 0;
573     s->ti_size = 0;
574     s->ti_rptr = 0;
575     s->ti_wptr = 0;
576     s->dma = 0;
577     s->do_cmd = 0;
578     s->dma_cb = NULL;
579 
580     s->rregs[ESP_CFG1] = 7;
581 }
582 
583 static void esp_soft_reset(ESPState *s)
584 {
585     qemu_irq_lower(s->irq);
586     qemu_irq_lower(s->irq_data);
587     esp_hard_reset(s);
588 }
589 
590 static void parent_esp_reset(ESPState *s, int irq, int level)
591 {
592     if (level) {
593         esp_soft_reset(s);
594     }
595 }
596 
597 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
598 {
599     uint32_t val;
600 
601     switch (saddr) {
602     case ESP_FIFO:
603         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
604             /* Data out.  */
605             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
606             s->rregs[ESP_FIFO] = 0;
607         } else if (s->ti_rptr < s->ti_wptr) {
608             s->ti_size--;
609             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
610         }
611         if (s->ti_rptr == s->ti_wptr) {
612             s->ti_rptr = 0;
613             s->ti_wptr = 0;
614         }
615         val = s->rregs[ESP_FIFO];
616         break;
617     case ESP_RINTR:
618         /*
619          * Clear sequence step, interrupt register and all status bits
620          * except TC
621          */
622         val = s->rregs[ESP_RINTR];
623         s->rregs[ESP_RINTR] = 0;
624         s->rregs[ESP_RSTAT] &= ~STAT_TC;
625         s->rregs[ESP_RSEQ] = SEQ_CD;
626         esp_lower_irq(s);
627         if (s->deferred_complete) {
628             esp_report_command_complete(s, s->deferred_status);
629             s->deferred_complete = false;
630         }
631         break;
632     case ESP_TCHI:
633         /* Return the unique id if the value has never been written */
634         if (!s->tchi_written) {
635             val = s->chip_id;
636         } else {
637             val = s->rregs[saddr];
638         }
639         break;
640     default:
641         val = s->rregs[saddr];
642         break;
643     }
644 
645     trace_esp_mem_readb(saddr, val);
646     return val;
647 }
648 
649 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
650 {
651     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
652     switch (saddr) {
653     case ESP_TCHI:
654         s->tchi_written = true;
655         /* fall through */
656     case ESP_TCLO:
657     case ESP_TCMID:
658         s->rregs[ESP_RSTAT] &= ~STAT_TC;
659         break;
660     case ESP_FIFO:
661         if (s->do_cmd) {
662             if (s->cmdlen < ESP_CMDBUF_SZ) {
663                 s->cmdbuf[s->cmdlen++] = val & 0xff;
664             } else {
665                 trace_esp_error_fifo_overrun();
666             }
667         } else if (s->ti_wptr == TI_BUFSZ - 1) {
668             trace_esp_error_fifo_overrun();
669         } else {
670             s->ti_size++;
671             s->ti_buf[s->ti_wptr++] = val & 0xff;
672         }
673         break;
674     case ESP_CMD:
675         s->rregs[saddr] = val;
676         if (val & CMD_DMA) {
677             s->dma = 1;
678             /* Reload DMA counter.  */
679             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
680             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
681             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
682         } else {
683             s->dma = 0;
684         }
685         switch (val & CMD_CMD) {
686         case CMD_NOP:
687             trace_esp_mem_writeb_cmd_nop(val);
688             break;
689         case CMD_FLUSH:
690             trace_esp_mem_writeb_cmd_flush(val);
691             /*s->ti_size = 0;*/
692             s->rregs[ESP_RINTR] = INTR_FC;
693             s->rregs[ESP_RSEQ] = 0;
694             s->rregs[ESP_RFLAGS] = 0;
695             break;
696         case CMD_RESET:
697             trace_esp_mem_writeb_cmd_reset(val);
698             esp_soft_reset(s);
699             break;
700         case CMD_BUSRESET:
701             trace_esp_mem_writeb_cmd_bus_reset(val);
702             s->rregs[ESP_RINTR] = INTR_RST;
703             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
704                 esp_raise_irq(s);
705             }
706             break;
707         case CMD_TI:
708             trace_esp_mem_writeb_cmd_ti(val);
709             handle_ti(s);
710             break;
711         case CMD_ICCS:
712             trace_esp_mem_writeb_cmd_iccs(val);
713             write_response(s);
714             s->rregs[ESP_RINTR] = INTR_FC;
715             s->rregs[ESP_RSTAT] |= STAT_MI;
716             break;
717         case CMD_MSGACC:
718             trace_esp_mem_writeb_cmd_msgacc(val);
719             s->rregs[ESP_RINTR] = INTR_DC;
720             s->rregs[ESP_RSEQ] = 0;
721             s->rregs[ESP_RFLAGS] = 0;
722             esp_raise_irq(s);
723             break;
724         case CMD_PAD:
725             trace_esp_mem_writeb_cmd_pad(val);
726             s->rregs[ESP_RSTAT] = STAT_TC;
727             s->rregs[ESP_RINTR] = INTR_FC;
728             s->rregs[ESP_RSEQ] = 0;
729             break;
730         case CMD_SATN:
731             trace_esp_mem_writeb_cmd_satn(val);
732             break;
733         case CMD_RSTATN:
734             trace_esp_mem_writeb_cmd_rstatn(val);
735             break;
736         case CMD_SEL:
737             trace_esp_mem_writeb_cmd_sel(val);
738             handle_s_without_atn(s);
739             break;
740         case CMD_SELATN:
741             trace_esp_mem_writeb_cmd_selatn(val);
742             handle_satn(s);
743             break;
744         case CMD_SELATNS:
745             trace_esp_mem_writeb_cmd_selatns(val);
746             handle_satn_stop(s);
747             break;
748         case CMD_ENSEL:
749             trace_esp_mem_writeb_cmd_ensel(val);
750             s->rregs[ESP_RINTR] = 0;
751             break;
752         case CMD_DISSEL:
753             trace_esp_mem_writeb_cmd_dissel(val);
754             s->rregs[ESP_RINTR] = 0;
755             esp_raise_irq(s);
756             break;
757         default:
758             trace_esp_error_unhandled_command(val);
759             break;
760         }
761         break;
762     case ESP_WBUSID ... ESP_WSYNO:
763         break;
764     case ESP_CFG1:
765     case ESP_CFG2: case ESP_CFG3:
766     case ESP_RES3: case ESP_RES4:
767         s->rregs[saddr] = val;
768         break;
769     case ESP_WCCF ... ESP_WTEST:
770         break;
771     default:
772         trace_esp_error_invalid_write(val, saddr);
773         return;
774     }
775     s->wregs[saddr] = val;
776 }
777 
778 static bool esp_mem_accepts(void *opaque, hwaddr addr,
779                             unsigned size, bool is_write,
780                             MemTxAttrs attrs)
781 {
782     return (size == 1) || (is_write && size == 4);
783 }
784 
785 static bool esp_pdma_needed(void *opaque)
786 {
787     ESPState *s = opaque;
788     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
789            s->dma_enabled;
790 }
791 
792 static const VMStateDescription vmstate_esp_pdma = {
793     .name = "esp/pdma",
794     .version_id = 1,
795     .minimum_version_id = 1,
796     .needed = esp_pdma_needed,
797     .fields = (VMStateField[]) {
798         VMSTATE_BUFFER(pdma_buf, ESPState),
799         VMSTATE_INT32(pdma_origin, ESPState),
800         VMSTATE_UINT32(pdma_len, ESPState),
801         VMSTATE_UINT32(pdma_start, ESPState),
802         VMSTATE_UINT32(pdma_cur, ESPState),
803         VMSTATE_END_OF_LIST()
804     }
805 };
806 
807 static int esp_pre_save(void *opaque)
808 {
809     ESPState *s = ESP(opaque);
810 
811     s->mig_version_id = vmstate_esp.version_id;
812     return 0;
813 }
814 
815 static int esp_post_load(void *opaque, int version_id)
816 {
817     ESPState *s = ESP(opaque);
818 
819     s->mig_version_id = vmstate_esp.version_id;
820     return 0;
821 }
822 
823 const VMStateDescription vmstate_esp = {
824     .name = "esp",
825     .version_id = 5,
826     .minimum_version_id = 3,
827     .pre_save = esp_pre_save,
828     .post_load = esp_post_load,
829     .fields = (VMStateField[]) {
830         VMSTATE_BUFFER(rregs, ESPState),
831         VMSTATE_BUFFER(wregs, ESPState),
832         VMSTATE_INT32(ti_size, ESPState),
833         VMSTATE_UINT32(ti_rptr, ESPState),
834         VMSTATE_UINT32(ti_wptr, ESPState),
835         VMSTATE_BUFFER(ti_buf, ESPState),
836         VMSTATE_UINT32(status, ESPState),
837         VMSTATE_UINT32(deferred_status, ESPState),
838         VMSTATE_BOOL(deferred_complete, ESPState),
839         VMSTATE_UINT32(dma, ESPState),
840         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
841         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
842         VMSTATE_UINT32(cmdlen, ESPState),
843         VMSTATE_UINT32(do_cmd, ESPState),
844         VMSTATE_UINT32(dma_left, ESPState),
845         VMSTATE_END_OF_LIST()
846     },
847     .subsections = (const VMStateDescription * []) {
848         &vmstate_esp_pdma,
849         NULL
850     }
851 };
852 
853 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
854                                  uint64_t val, unsigned int size)
855 {
856     SysBusESPState *sysbus = opaque;
857     ESPState *s = ESP(&sysbus->esp);
858     uint32_t saddr;
859 
860     saddr = addr >> sysbus->it_shift;
861     esp_reg_write(s, saddr, val);
862 }
863 
864 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
865                                     unsigned int size)
866 {
867     SysBusESPState *sysbus = opaque;
868     ESPState *s = ESP(&sysbus->esp);
869     uint32_t saddr;
870 
871     saddr = addr >> sysbus->it_shift;
872     return esp_reg_read(s, saddr);
873 }
874 
875 static const MemoryRegionOps sysbus_esp_mem_ops = {
876     .read = sysbus_esp_mem_read,
877     .write = sysbus_esp_mem_write,
878     .endianness = DEVICE_NATIVE_ENDIAN,
879     .valid.accepts = esp_mem_accepts,
880 };
881 
882 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
883                                   uint64_t val, unsigned int size)
884 {
885     SysBusESPState *sysbus = opaque;
886     ESPState *s = ESP(&sysbus->esp);
887     uint32_t dmalen;
888     uint8_t *buf = get_pdma_buf(s);
889 
890     trace_esp_pdma_write(size);
891 
892     dmalen = s->rregs[ESP_TCLO];
893     dmalen |= s->rregs[ESP_TCMID] << 8;
894     dmalen |= s->rregs[ESP_TCHI] << 16;
895     if (dmalen == 0 || s->pdma_len == 0) {
896         return;
897     }
898     switch (size) {
899     case 1:
900         buf[s->pdma_cur++] = val;
901         s->pdma_len--;
902         dmalen--;
903         break;
904     case 2:
905         buf[s->pdma_cur++] = val >> 8;
906         buf[s->pdma_cur++] = val;
907         s->pdma_len -= 2;
908         dmalen -= 2;
909         break;
910     }
911     s->rregs[ESP_TCLO] = dmalen & 0xff;
912     s->rregs[ESP_TCMID] = dmalen >> 8;
913     s->rregs[ESP_TCHI] = dmalen >> 16;
914     if (s->pdma_len == 0 && s->pdma_cb) {
915         esp_lower_drq(s);
916         s->pdma_cb(s);
917         s->pdma_cb = NULL;
918     }
919 }
920 
921 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
922                                      unsigned int size)
923 {
924     SysBusESPState *sysbus = opaque;
925     ESPState *s = ESP(&sysbus->esp);
926     uint8_t *buf = get_pdma_buf(s);
927     uint64_t val = 0;
928 
929     trace_esp_pdma_read(size);
930 
931     if (s->pdma_len == 0) {
932         return 0;
933     }
934     switch (size) {
935     case 1:
936         val = buf[s->pdma_cur++];
937         s->pdma_len--;
938         break;
939     case 2:
940         val = buf[s->pdma_cur++];
941         val = (val << 8) | buf[s->pdma_cur++];
942         s->pdma_len -= 2;
943         break;
944     }
945 
946     if (s->pdma_len == 0 && s->pdma_cb) {
947         esp_lower_drq(s);
948         s->pdma_cb(s);
949         s->pdma_cb = NULL;
950     }
951     return val;
952 }
953 
954 static const MemoryRegionOps sysbus_esp_pdma_ops = {
955     .read = sysbus_esp_pdma_read,
956     .write = sysbus_esp_pdma_write,
957     .endianness = DEVICE_NATIVE_ENDIAN,
958     .valid.min_access_size = 1,
959     .valid.max_access_size = 2,
960 };
961 
962 static const struct SCSIBusInfo esp_scsi_info = {
963     .tcq = false,
964     .max_target = ESP_MAX_DEVS,
965     .max_lun = 7,
966 
967     .transfer_data = esp_transfer_data,
968     .complete = esp_command_complete,
969     .cancel = esp_request_cancelled
970 };
971 
972 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
973 {
974     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
975     ESPState *s = ESP(&sysbus->esp);
976 
977     switch (irq) {
978     case 0:
979         parent_esp_reset(s, irq, level);
980         break;
981     case 1:
982         esp_dma_enable(opaque, irq, level);
983         break;
984     }
985 }
986 
987 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
988 {
989     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
990     SysBusESPState *sysbus = SYSBUS_ESP(dev);
991     ESPState *s = ESP(&sysbus->esp);
992 
993     if (!qdev_realize(DEVICE(s), NULL, errp)) {
994         return;
995     }
996 
997     sysbus_init_irq(sbd, &s->irq);
998     sysbus_init_irq(sbd, &s->irq_data);
999     assert(sysbus->it_shift != -1);
1000 
1001     s->chip_id = TCHI_FAS100A;
1002     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1003                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1004     sysbus_init_mmio(sbd, &sysbus->iomem);
1005     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1006                           sysbus, "esp-pdma", 2);
1007     sysbus_init_mmio(sbd, &sysbus->pdma);
1008 
1009     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1010 
1011     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1012 }
1013 
1014 static void sysbus_esp_hard_reset(DeviceState *dev)
1015 {
1016     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1017     ESPState *s = ESP(&sysbus->esp);
1018 
1019     esp_hard_reset(s);
1020 }
1021 
1022 static void sysbus_esp_init(Object *obj)
1023 {
1024     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1025 
1026     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1027 }
1028 
1029 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1030     .name = "sysbusespscsi",
1031     .version_id = 2,
1032     .minimum_version_id = 1,
1033     .fields = (VMStateField[]) {
1034         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1035         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1036         VMSTATE_END_OF_LIST()
1037     }
1038 };
1039 
1040 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1041 {
1042     DeviceClass *dc = DEVICE_CLASS(klass);
1043 
1044     dc->realize = sysbus_esp_realize;
1045     dc->reset = sysbus_esp_hard_reset;
1046     dc->vmsd = &vmstate_sysbus_esp_scsi;
1047     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1048 }
1049 
1050 static const TypeInfo sysbus_esp_info = {
1051     .name          = TYPE_SYSBUS_ESP,
1052     .parent        = TYPE_SYS_BUS_DEVICE,
1053     .instance_init = sysbus_esp_init,
1054     .instance_size = sizeof(SysBusESPState),
1055     .class_init    = sysbus_esp_class_init,
1056 };
1057 
1058 static void esp_class_init(ObjectClass *klass, void *data)
1059 {
1060     DeviceClass *dc = DEVICE_CLASS(klass);
1061 
1062     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1063     dc->user_creatable = false;
1064     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1065 }
1066 
1067 static const TypeInfo esp_info = {
1068     .name = TYPE_ESP,
1069     .parent = TYPE_DEVICE,
1070     .instance_size = sizeof(ESPState),
1071     .class_init = esp_class_init,
1072 };
1073 
1074 static void esp_register_types(void)
1075 {
1076     type_register_static(&sysbus_esp_info);
1077     type_register_static(&esp_info);
1078 }
1079 
1080 type_init(esp_register_types)
1081