xref: /qemu/hw/scsi/esp.c (revision 0097d3ec17cf9e5a55dc33bee412a380f757ff23)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66 }
67 
68 static void esp_lower_drq(ESPState *s)
69 {
70     qemu_irq_lower(s->irq_data);
71 }
72 
73 void esp_dma_enable(ESPState *s, int irq, int level)
74 {
75     if (level) {
76         s->dma_enabled = 1;
77         trace_esp_dma_enable();
78         if (s->dma_cb) {
79             s->dma_cb(s);
80             s->dma_cb = NULL;
81         }
82     } else {
83         trace_esp_dma_disable();
84         s->dma_enabled = 0;
85     }
86 }
87 
88 void esp_request_cancelled(SCSIRequest *req)
89 {
90     ESPState *s = req->hba_private;
91 
92     if (req == s->current_req) {
93         scsi_req_unref(s->current_req);
94         s->current_req = NULL;
95         s->current_dev = NULL;
96     }
97 }
98 
99 static void set_pdma(ESPState *s, enum pdma_origin_id origin,
100                      uint32_t index, uint32_t len)
101 {
102     s->pdma_origin = origin;
103     s->pdma_start = index;
104     s->pdma_cur = index;
105     s->pdma_len = len;
106 }
107 
108 static uint8_t *get_pdma_buf(ESPState *s)
109 {
110     switch (s->pdma_origin) {
111     case PDMA:
112         return s->pdma_buf;
113     case TI:
114         return s->ti_buf;
115     case CMD:
116         return s->cmdbuf;
117     case ASYNC:
118         return s->async_buf;
119     }
120     return NULL;
121 }
122 
123 static int get_cmd_cb(ESPState *s)
124 {
125     int target;
126 
127     target = s->wregs[ESP_WBUSID] & BUSID_DID;
128 
129     s->ti_size = 0;
130     s->ti_rptr = 0;
131     s->ti_wptr = 0;
132 
133     if (s->current_req) {
134         /* Started a new command before the old one finished.  Cancel it.  */
135         scsi_req_cancel(s->current_req);
136         s->async_len = 0;
137     }
138 
139     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
140     if (!s->current_dev) {
141         /* No such drive */
142         s->rregs[ESP_RSTAT] = 0;
143         s->rregs[ESP_RINTR] = INTR_DC;
144         s->rregs[ESP_RSEQ] = SEQ_0;
145         esp_raise_irq(s);
146         return -1;
147     }
148     return 0;
149 }
150 
151 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
152 {
153     uint32_t dmalen;
154     int target;
155 
156     target = s->wregs[ESP_WBUSID] & BUSID_DID;
157     if (s->dma) {
158         dmalen = s->rregs[ESP_TCLO];
159         dmalen |= s->rregs[ESP_TCMID] << 8;
160         dmalen |= s->rregs[ESP_TCHI] << 16;
161         if (dmalen > buflen) {
162             return 0;
163         }
164         if (s->dma_memory_read) {
165             s->dma_memory_read(s->dma_opaque, buf, dmalen);
166         } else {
167             memcpy(s->pdma_buf, buf, dmalen);
168             set_pdma(s, PDMA, 0, dmalen);
169             esp_raise_drq(s);
170             return 0;
171         }
172     } else {
173         dmalen = s->ti_size;
174         if (dmalen > TI_BUFSZ) {
175             return 0;
176         }
177         memcpy(buf, s->ti_buf, dmalen);
178         buf[0] = buf[2] >> 5;
179     }
180     trace_esp_get_cmd(dmalen, target);
181 
182     if (get_cmd_cb(s) < 0) {
183         return 0;
184     }
185     return dmalen;
186 }
187 
188 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
189 {
190     int32_t datalen;
191     int lun;
192     SCSIDevice *current_lun;
193 
194     trace_esp_do_busid_cmd(busid);
195     lun = busid & 7;
196     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
197     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
198     datalen = scsi_req_enqueue(s->current_req);
199     s->ti_size = datalen;
200     if (datalen != 0) {
201         s->rregs[ESP_RSTAT] = STAT_TC;
202         s->dma_left = 0;
203         s->dma_counter = 0;
204         if (datalen > 0) {
205             s->rregs[ESP_RSTAT] |= STAT_DI;
206         } else {
207             s->rregs[ESP_RSTAT] |= STAT_DO;
208         }
209         scsi_req_continue(s->current_req);
210     }
211     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
212     s->rregs[ESP_RSEQ] = SEQ_CD;
213     esp_raise_irq(s);
214 }
215 
216 static void do_cmd(ESPState *s, uint8_t *buf)
217 {
218     uint8_t busid = buf[0];
219 
220     do_busid_cmd(s, &buf[1], busid);
221 }
222 
223 static void satn_pdma_cb(ESPState *s)
224 {
225     if (get_cmd_cb(s) < 0) {
226         return;
227     }
228     if (s->pdma_cur != s->pdma_start) {
229         do_cmd(s, get_pdma_buf(s) + s->pdma_start);
230     }
231 }
232 
233 static void handle_satn(ESPState *s)
234 {
235     uint8_t buf[32];
236     int len;
237 
238     if (s->dma && !s->dma_enabled) {
239         s->dma_cb = handle_satn;
240         return;
241     }
242     s->pdma_cb = satn_pdma_cb;
243     len = get_cmd(s, buf, sizeof(buf));
244     if (len) {
245         do_cmd(s, buf);
246     }
247 }
248 
249 static void s_without_satn_pdma_cb(ESPState *s)
250 {
251     if (get_cmd_cb(s) < 0) {
252         return;
253     }
254     if (s->pdma_cur != s->pdma_start) {
255         do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
256     }
257 }
258 
259 static void handle_s_without_atn(ESPState *s)
260 {
261     uint8_t buf[32];
262     int len;
263 
264     if (s->dma && !s->dma_enabled) {
265         s->dma_cb = handle_s_without_atn;
266         return;
267     }
268     s->pdma_cb = s_without_satn_pdma_cb;
269     len = get_cmd(s, buf, sizeof(buf));
270     if (len) {
271         do_busid_cmd(s, buf, 0);
272     }
273 }
274 
275 static void satn_stop_pdma_cb(ESPState *s)
276 {
277     if (get_cmd_cb(s) < 0) {
278         return;
279     }
280     s->cmdlen = s->pdma_cur - s->pdma_start;
281     if (s->cmdlen) {
282         trace_esp_handle_satn_stop(s->cmdlen);
283         s->do_cmd = 1;
284         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
285         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
286         s->rregs[ESP_RSEQ] = SEQ_CD;
287         esp_raise_irq(s);
288     }
289 }
290 
291 static void handle_satn_stop(ESPState *s)
292 {
293     if (s->dma && !s->dma_enabled) {
294         s->dma_cb = handle_satn_stop;
295         return;
296     }
297     s->pdma_cb = satn_stop_pdma_cb;
298     s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
299     if (s->cmdlen) {
300         trace_esp_handle_satn_stop(s->cmdlen);
301         s->do_cmd = 1;
302         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
303         s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
304         s->rregs[ESP_RSEQ] = SEQ_CD;
305         esp_raise_irq(s);
306     }
307 }
308 
309 static void write_response_pdma_cb(ESPState *s)
310 {
311     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
312     s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
313     s->rregs[ESP_RSEQ] = SEQ_CD;
314     esp_raise_irq(s);
315 }
316 
317 static void write_response(ESPState *s)
318 {
319     trace_esp_write_response(s->status);
320     s->ti_buf[0] = s->status;
321     s->ti_buf[1] = 0;
322     if (s->dma) {
323         if (s->dma_memory_write) {
324             s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
325             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
326             s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
327             s->rregs[ESP_RSEQ] = SEQ_CD;
328         } else {
329             set_pdma(s, TI, 0, 2);
330             s->pdma_cb = write_response_pdma_cb;
331             esp_raise_drq(s);
332             return;
333         }
334     } else {
335         s->ti_size = 2;
336         s->ti_rptr = 0;
337         s->ti_wptr = 2;
338         s->rregs[ESP_RFLAGS] = 2;
339     }
340     esp_raise_irq(s);
341 }
342 
343 static void esp_dma_done(ESPState *s)
344 {
345     s->rregs[ESP_RSTAT] |= STAT_TC;
346     s->rregs[ESP_RINTR] = INTR_BS;
347     s->rregs[ESP_RSEQ] = 0;
348     s->rregs[ESP_RFLAGS] = 0;
349     s->rregs[ESP_TCLO] = 0;
350     s->rregs[ESP_TCMID] = 0;
351     s->rregs[ESP_TCHI] = 0;
352     esp_raise_irq(s);
353 }
354 
355 static void do_dma_pdma_cb(ESPState *s)
356 {
357     int to_device = (s->ti_size < 0);
358     int len = s->pdma_cur - s->pdma_start;
359     if (s->do_cmd) {
360         s->ti_size = 0;
361         s->cmdlen = 0;
362         s->do_cmd = 0;
363         do_cmd(s, s->cmdbuf);
364         return;
365     }
366     s->dma_left -= len;
367     s->async_buf += len;
368     s->async_len -= len;
369     if (to_device) {
370         s->ti_size += len;
371     } else {
372         s->ti_size -= len;
373     }
374     if (s->async_len == 0) {
375         scsi_req_continue(s->current_req);
376         /*
377          * If there is still data to be read from the device then
378          * complete the DMA operation immediately.  Otherwise defer
379          * until the scsi layer has completed.
380          */
381         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
382             return;
383         }
384     }
385 
386     /* Partially filled a scsi buffer. Complete immediately.  */
387     esp_dma_done(s);
388 }
389 
390 static void esp_do_dma(ESPState *s)
391 {
392     uint32_t len;
393     int to_device;
394 
395     len = s->dma_left;
396     if (s->do_cmd) {
397         /*
398          * handle_ti_cmd() case: esp_do_dma() is called only from
399          * handle_ti_cmd() with do_cmd != NULL (see the assert())
400          */
401         trace_esp_do_dma(s->cmdlen, len);
402         assert(s->cmdlen <= sizeof(s->cmdbuf) &&
403                len <= sizeof(s->cmdbuf) - s->cmdlen);
404         if (s->dma_memory_read) {
405             s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
406         } else {
407             set_pdma(s, CMD, s->cmdlen, len);
408             s->pdma_cb = do_dma_pdma_cb;
409             esp_raise_drq(s);
410             return;
411         }
412         trace_esp_handle_ti_cmd(s->cmdlen);
413         s->ti_size = 0;
414         s->cmdlen = 0;
415         s->do_cmd = 0;
416         do_cmd(s, s->cmdbuf);
417         return;
418     }
419     if (s->async_len == 0) {
420         /* Defer until data is available.  */
421         return;
422     }
423     if (len > s->async_len) {
424         len = s->async_len;
425     }
426     to_device = (s->ti_size < 0);
427     if (to_device) {
428         if (s->dma_memory_read) {
429             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
430         } else {
431             set_pdma(s, ASYNC, 0, len);
432             s->pdma_cb = do_dma_pdma_cb;
433             esp_raise_drq(s);
434             return;
435         }
436     } else {
437         if (s->dma_memory_write) {
438             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
439         } else {
440             set_pdma(s, ASYNC, 0, len);
441             s->pdma_cb = do_dma_pdma_cb;
442             esp_raise_drq(s);
443             return;
444         }
445     }
446     s->dma_left -= len;
447     s->async_buf += len;
448     s->async_len -= len;
449     if (to_device) {
450         s->ti_size += len;
451     } else {
452         s->ti_size -= len;
453     }
454     if (s->async_len == 0) {
455         scsi_req_continue(s->current_req);
456         /*
457          * If there is still data to be read from the device then
458          * complete the DMA operation immediately.  Otherwise defer
459          * until the scsi layer has completed.
460          */
461         if (to_device || s->dma_left != 0 || s->ti_size == 0) {
462             return;
463         }
464     }
465 
466     /* Partially filled a scsi buffer. Complete immediately.  */
467     esp_dma_done(s);
468 }
469 
470 static void esp_report_command_complete(ESPState *s, uint32_t status)
471 {
472     trace_esp_command_complete();
473     if (s->ti_size != 0) {
474         trace_esp_command_complete_unexpected();
475     }
476     s->ti_size = 0;
477     s->dma_left = 0;
478     s->async_len = 0;
479     if (status) {
480         trace_esp_command_complete_fail();
481     }
482     s->status = status;
483     s->rregs[ESP_RSTAT] = STAT_ST;
484     esp_dma_done(s);
485     if (s->current_req) {
486         scsi_req_unref(s->current_req);
487         s->current_req = NULL;
488         s->current_dev = NULL;
489     }
490 }
491 
492 void esp_command_complete(SCSIRequest *req, size_t resid)
493 {
494     ESPState *s = req->hba_private;
495 
496     if (s->rregs[ESP_RSTAT] & STAT_INT) {
497         /*
498          * Defer handling command complete until the previous
499          * interrupt has been handled.
500          */
501         trace_esp_command_complete_deferred();
502         s->deferred_status = req->status;
503         s->deferred_complete = true;
504         return;
505     }
506     esp_report_command_complete(s, req->status);
507 }
508 
509 void esp_transfer_data(SCSIRequest *req, uint32_t len)
510 {
511     ESPState *s = req->hba_private;
512 
513     assert(!s->do_cmd);
514     trace_esp_transfer_data(s->dma_left, s->ti_size);
515     s->async_len = len;
516     s->async_buf = scsi_req_get_buf(req);
517     if (s->dma_left) {
518         esp_do_dma(s);
519     } else if (s->dma_counter != 0 && s->ti_size <= 0) {
520         /*
521          * If this was the last part of a DMA transfer then the
522          * completion interrupt is deferred to here.
523          */
524         esp_dma_done(s);
525     }
526 }
527 
528 static void handle_ti(ESPState *s)
529 {
530     uint32_t dmalen, minlen;
531 
532     if (s->dma && !s->dma_enabled) {
533         s->dma_cb = handle_ti;
534         return;
535     }
536 
537     dmalen = s->rregs[ESP_TCLO];
538     dmalen |= s->rregs[ESP_TCMID] << 8;
539     dmalen |= s->rregs[ESP_TCHI] << 16;
540     if (dmalen == 0) {
541         dmalen = 0x10000;
542     }
543     s->dma_counter = dmalen;
544 
545     if (s->do_cmd) {
546         minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
547     } else if (s->ti_size < 0) {
548         minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
549     } else {
550         minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
551     }
552     trace_esp_handle_ti(minlen);
553     if (s->dma) {
554         s->dma_left = minlen;
555         s->rregs[ESP_RSTAT] &= ~STAT_TC;
556         esp_do_dma(s);
557     } else if (s->do_cmd) {
558         trace_esp_handle_ti_cmd(s->cmdlen);
559         s->ti_size = 0;
560         s->cmdlen = 0;
561         s->do_cmd = 0;
562         do_cmd(s, s->cmdbuf);
563     }
564 }
565 
566 void esp_hard_reset(ESPState *s)
567 {
568     memset(s->rregs, 0, ESP_REGS);
569     memset(s->wregs, 0, ESP_REGS);
570     s->tchi_written = 0;
571     s->ti_size = 0;
572     s->ti_rptr = 0;
573     s->ti_wptr = 0;
574     s->dma = 0;
575     s->do_cmd = 0;
576     s->dma_cb = NULL;
577 
578     s->rregs[ESP_CFG1] = 7;
579 }
580 
581 static void esp_soft_reset(ESPState *s)
582 {
583     qemu_irq_lower(s->irq);
584     qemu_irq_lower(s->irq_data);
585     esp_hard_reset(s);
586 }
587 
588 static void parent_esp_reset(ESPState *s, int irq, int level)
589 {
590     if (level) {
591         esp_soft_reset(s);
592     }
593 }
594 
595 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
596 {
597     uint32_t old_val;
598 
599     trace_esp_mem_readb(saddr, s->rregs[saddr]);
600     switch (saddr) {
601     case ESP_FIFO:
602         if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
603             /* Data out.  */
604             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
605             s->rregs[ESP_FIFO] = 0;
606         } else if (s->ti_rptr < s->ti_wptr) {
607             s->ti_size--;
608             s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
609         }
610         if (s->ti_rptr == s->ti_wptr) {
611             s->ti_rptr = 0;
612             s->ti_wptr = 0;
613         }
614         break;
615     case ESP_RINTR:
616         /*
617          * Clear sequence step, interrupt register and all status bits
618          * except TC
619          */
620         old_val = s->rregs[ESP_RINTR];
621         s->rregs[ESP_RINTR] = 0;
622         s->rregs[ESP_RSTAT] &= ~STAT_TC;
623         s->rregs[ESP_RSEQ] = SEQ_CD;
624         esp_lower_irq(s);
625         if (s->deferred_complete) {
626             esp_report_command_complete(s, s->deferred_status);
627             s->deferred_complete = false;
628         }
629         return old_val;
630     case ESP_TCHI:
631         /* Return the unique id if the value has never been written */
632         if (!s->tchi_written) {
633             return s->chip_id;
634         }
635     default:
636         break;
637     }
638     return s->rregs[saddr];
639 }
640 
641 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
642 {
643     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
644     switch (saddr) {
645     case ESP_TCHI:
646         s->tchi_written = true;
647         /* fall through */
648     case ESP_TCLO:
649     case ESP_TCMID:
650         s->rregs[ESP_RSTAT] &= ~STAT_TC;
651         break;
652     case ESP_FIFO:
653         if (s->do_cmd) {
654             if (s->cmdlen < ESP_CMDBUF_SZ) {
655                 s->cmdbuf[s->cmdlen++] = val & 0xff;
656             } else {
657                 trace_esp_error_fifo_overrun();
658             }
659         } else if (s->ti_wptr == TI_BUFSZ - 1) {
660             trace_esp_error_fifo_overrun();
661         } else {
662             s->ti_size++;
663             s->ti_buf[s->ti_wptr++] = val & 0xff;
664         }
665         break;
666     case ESP_CMD:
667         s->rregs[saddr] = val;
668         if (val & CMD_DMA) {
669             s->dma = 1;
670             /* Reload DMA counter.  */
671             s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
672             s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
673             s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
674         } else {
675             s->dma = 0;
676         }
677         switch (val & CMD_CMD) {
678         case CMD_NOP:
679             trace_esp_mem_writeb_cmd_nop(val);
680             break;
681         case CMD_FLUSH:
682             trace_esp_mem_writeb_cmd_flush(val);
683             /*s->ti_size = 0;*/
684             s->rregs[ESP_RINTR] = INTR_FC;
685             s->rregs[ESP_RSEQ] = 0;
686             s->rregs[ESP_RFLAGS] = 0;
687             break;
688         case CMD_RESET:
689             trace_esp_mem_writeb_cmd_reset(val);
690             esp_soft_reset(s);
691             break;
692         case CMD_BUSRESET:
693             trace_esp_mem_writeb_cmd_bus_reset(val);
694             s->rregs[ESP_RINTR] = INTR_RST;
695             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
696                 esp_raise_irq(s);
697             }
698             break;
699         case CMD_TI:
700             trace_esp_mem_writeb_cmd_ti(val);
701             handle_ti(s);
702             break;
703         case CMD_ICCS:
704             trace_esp_mem_writeb_cmd_iccs(val);
705             write_response(s);
706             s->rregs[ESP_RINTR] = INTR_FC;
707             s->rregs[ESP_RSTAT] |= STAT_MI;
708             break;
709         case CMD_MSGACC:
710             trace_esp_mem_writeb_cmd_msgacc(val);
711             s->rregs[ESP_RINTR] = INTR_DC;
712             s->rregs[ESP_RSEQ] = 0;
713             s->rregs[ESP_RFLAGS] = 0;
714             esp_raise_irq(s);
715             break;
716         case CMD_PAD:
717             trace_esp_mem_writeb_cmd_pad(val);
718             s->rregs[ESP_RSTAT] = STAT_TC;
719             s->rregs[ESP_RINTR] = INTR_FC;
720             s->rregs[ESP_RSEQ] = 0;
721             break;
722         case CMD_SATN:
723             trace_esp_mem_writeb_cmd_satn(val);
724             break;
725         case CMD_RSTATN:
726             trace_esp_mem_writeb_cmd_rstatn(val);
727             break;
728         case CMD_SEL:
729             trace_esp_mem_writeb_cmd_sel(val);
730             handle_s_without_atn(s);
731             break;
732         case CMD_SELATN:
733             trace_esp_mem_writeb_cmd_selatn(val);
734             handle_satn(s);
735             break;
736         case CMD_SELATNS:
737             trace_esp_mem_writeb_cmd_selatns(val);
738             handle_satn_stop(s);
739             break;
740         case CMD_ENSEL:
741             trace_esp_mem_writeb_cmd_ensel(val);
742             s->rregs[ESP_RINTR] = 0;
743             break;
744         case CMD_DISSEL:
745             trace_esp_mem_writeb_cmd_dissel(val);
746             s->rregs[ESP_RINTR] = 0;
747             esp_raise_irq(s);
748             break;
749         default:
750             trace_esp_error_unhandled_command(val);
751             break;
752         }
753         break;
754     case ESP_WBUSID ... ESP_WSYNO:
755         break;
756     case ESP_CFG1:
757     case ESP_CFG2: case ESP_CFG3:
758     case ESP_RES3: case ESP_RES4:
759         s->rregs[saddr] = val;
760         break;
761     case ESP_WCCF ... ESP_WTEST:
762         break;
763     default:
764         trace_esp_error_invalid_write(val, saddr);
765         return;
766     }
767     s->wregs[saddr] = val;
768 }
769 
770 static bool esp_mem_accepts(void *opaque, hwaddr addr,
771                             unsigned size, bool is_write,
772                             MemTxAttrs attrs)
773 {
774     return (size == 1) || (is_write && size == 4);
775 }
776 
777 static bool esp_pdma_needed(void *opaque)
778 {
779     ESPState *s = opaque;
780     return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
781            s->dma_enabled;
782 }
783 
784 static const VMStateDescription vmstate_esp_pdma = {
785     .name = "esp/pdma",
786     .version_id = 1,
787     .minimum_version_id = 1,
788     .needed = esp_pdma_needed,
789     .fields = (VMStateField[]) {
790         VMSTATE_BUFFER(pdma_buf, ESPState),
791         VMSTATE_INT32(pdma_origin, ESPState),
792         VMSTATE_UINT32(pdma_len, ESPState),
793         VMSTATE_UINT32(pdma_start, ESPState),
794         VMSTATE_UINT32(pdma_cur, ESPState),
795         VMSTATE_END_OF_LIST()
796     }
797 };
798 
799 static int esp_pre_save(void *opaque)
800 {
801     ESPState *s = ESP(opaque);
802 
803     s->mig_version_id = vmstate_esp.version_id;
804     return 0;
805 }
806 
807 static int esp_post_load(void *opaque, int version_id)
808 {
809     ESPState *s = ESP(opaque);
810 
811     s->mig_version_id = vmstate_esp.version_id;
812     return 0;
813 }
814 
815 const VMStateDescription vmstate_esp = {
816     .name = "esp",
817     .version_id = 5,
818     .minimum_version_id = 3,
819     .pre_save = esp_pre_save,
820     .post_load = esp_post_load,
821     .fields = (VMStateField[]) {
822         VMSTATE_BUFFER(rregs, ESPState),
823         VMSTATE_BUFFER(wregs, ESPState),
824         VMSTATE_INT32(ti_size, ESPState),
825         VMSTATE_UINT32(ti_rptr, ESPState),
826         VMSTATE_UINT32(ti_wptr, ESPState),
827         VMSTATE_BUFFER(ti_buf, ESPState),
828         VMSTATE_UINT32(status, ESPState),
829         VMSTATE_UINT32(deferred_status, ESPState),
830         VMSTATE_BOOL(deferred_complete, ESPState),
831         VMSTATE_UINT32(dma, ESPState),
832         VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
833         VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
834         VMSTATE_UINT32(cmdlen, ESPState),
835         VMSTATE_UINT32(do_cmd, ESPState),
836         VMSTATE_UINT32(dma_left, ESPState),
837         VMSTATE_END_OF_LIST()
838     },
839     .subsections = (const VMStateDescription * []) {
840         &vmstate_esp_pdma,
841         NULL
842     }
843 };
844 
845 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
846                                  uint64_t val, unsigned int size)
847 {
848     SysBusESPState *sysbus = opaque;
849     ESPState *s = ESP(&sysbus->esp);
850     uint32_t saddr;
851 
852     saddr = addr >> sysbus->it_shift;
853     esp_reg_write(s, saddr, val);
854 }
855 
856 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
857                                     unsigned int size)
858 {
859     SysBusESPState *sysbus = opaque;
860     ESPState *s = ESP(&sysbus->esp);
861     uint32_t saddr;
862 
863     saddr = addr >> sysbus->it_shift;
864     return esp_reg_read(s, saddr);
865 }
866 
867 static const MemoryRegionOps sysbus_esp_mem_ops = {
868     .read = sysbus_esp_mem_read,
869     .write = sysbus_esp_mem_write,
870     .endianness = DEVICE_NATIVE_ENDIAN,
871     .valid.accepts = esp_mem_accepts,
872 };
873 
874 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
875                                   uint64_t val, unsigned int size)
876 {
877     SysBusESPState *sysbus = opaque;
878     ESPState *s = ESP(&sysbus->esp);
879     uint32_t dmalen;
880     uint8_t *buf = get_pdma_buf(s);
881 
882     dmalen = s->rregs[ESP_TCLO];
883     dmalen |= s->rregs[ESP_TCMID] << 8;
884     dmalen |= s->rregs[ESP_TCHI] << 16;
885     if (dmalen == 0 || s->pdma_len == 0) {
886         return;
887     }
888     switch (size) {
889     case 1:
890         buf[s->pdma_cur++] = val;
891         s->pdma_len--;
892         dmalen--;
893         break;
894     case 2:
895         buf[s->pdma_cur++] = val >> 8;
896         buf[s->pdma_cur++] = val;
897         s->pdma_len -= 2;
898         dmalen -= 2;
899         break;
900     }
901     s->rregs[ESP_TCLO] = dmalen & 0xff;
902     s->rregs[ESP_TCMID] = dmalen >> 8;
903     s->rregs[ESP_TCHI] = dmalen >> 16;
904     if (s->pdma_len == 0 && s->pdma_cb) {
905         esp_lower_drq(s);
906         s->pdma_cb(s);
907         s->pdma_cb = NULL;
908     }
909 }
910 
911 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
912                                      unsigned int size)
913 {
914     SysBusESPState *sysbus = opaque;
915     ESPState *s = ESP(&sysbus->esp);
916     uint8_t *buf = get_pdma_buf(s);
917     uint64_t val = 0;
918 
919     if (s->pdma_len == 0) {
920         return 0;
921     }
922     switch (size) {
923     case 1:
924         val = buf[s->pdma_cur++];
925         s->pdma_len--;
926         break;
927     case 2:
928         val = buf[s->pdma_cur++];
929         val = (val << 8) | buf[s->pdma_cur++];
930         s->pdma_len -= 2;
931         break;
932     }
933 
934     if (s->pdma_len == 0 && s->pdma_cb) {
935         esp_lower_drq(s);
936         s->pdma_cb(s);
937         s->pdma_cb = NULL;
938     }
939     return val;
940 }
941 
942 static const MemoryRegionOps sysbus_esp_pdma_ops = {
943     .read = sysbus_esp_pdma_read,
944     .write = sysbus_esp_pdma_write,
945     .endianness = DEVICE_NATIVE_ENDIAN,
946     .valid.min_access_size = 1,
947     .valid.max_access_size = 2,
948 };
949 
950 static const struct SCSIBusInfo esp_scsi_info = {
951     .tcq = false,
952     .max_target = ESP_MAX_DEVS,
953     .max_lun = 7,
954 
955     .transfer_data = esp_transfer_data,
956     .complete = esp_command_complete,
957     .cancel = esp_request_cancelled
958 };
959 
960 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
961 {
962     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
963     ESPState *s = ESP(&sysbus->esp);
964 
965     switch (irq) {
966     case 0:
967         parent_esp_reset(s, irq, level);
968         break;
969     case 1:
970         esp_dma_enable(opaque, irq, level);
971         break;
972     }
973 }
974 
975 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
976 {
977     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
978     SysBusESPState *sysbus = SYSBUS_ESP(dev);
979     ESPState *s = ESP(&sysbus->esp);
980 
981     if (!qdev_realize(DEVICE(s), NULL, errp)) {
982         return;
983     }
984 
985     sysbus_init_irq(sbd, &s->irq);
986     sysbus_init_irq(sbd, &s->irq_data);
987     assert(sysbus->it_shift != -1);
988 
989     s->chip_id = TCHI_FAS100A;
990     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
991                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
992     sysbus_init_mmio(sbd, &sysbus->iomem);
993     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
994                           sysbus, "esp-pdma", 2);
995     sysbus_init_mmio(sbd, &sysbus->pdma);
996 
997     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
998 
999     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1000 }
1001 
1002 static void sysbus_esp_hard_reset(DeviceState *dev)
1003 {
1004     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1005     ESPState *s = ESP(&sysbus->esp);
1006 
1007     esp_hard_reset(s);
1008 }
1009 
1010 static void sysbus_esp_init(Object *obj)
1011 {
1012     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1013 
1014     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1015 }
1016 
1017 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1018     .name = "sysbusespscsi",
1019     .version_id = 2,
1020     .minimum_version_id = 1,
1021     .fields = (VMStateField[]) {
1022         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1023         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1024         VMSTATE_END_OF_LIST()
1025     }
1026 };
1027 
1028 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1029 {
1030     DeviceClass *dc = DEVICE_CLASS(klass);
1031 
1032     dc->realize = sysbus_esp_realize;
1033     dc->reset = sysbus_esp_hard_reset;
1034     dc->vmsd = &vmstate_sysbus_esp_scsi;
1035     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1036 }
1037 
1038 static const TypeInfo sysbus_esp_info = {
1039     .name          = TYPE_SYSBUS_ESP,
1040     .parent        = TYPE_SYS_BUS_DEVICE,
1041     .instance_init = sysbus_esp_init,
1042     .instance_size = sizeof(SysBusESPState),
1043     .class_init    = sysbus_esp_class_init,
1044 };
1045 
1046 static void esp_class_init(ObjectClass *klass, void *data)
1047 {
1048     DeviceClass *dc = DEVICE_CLASS(klass);
1049 
1050     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1051     dc->user_creatable = false;
1052     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1053 }
1054 
1055 static const TypeInfo esp_info = {
1056     .name = TYPE_ESP,
1057     .parent = TYPE_DEVICE,
1058     .instance_size = sizeof(ESPState),
1059     .class_init = esp_class_init,
1060 };
1061 
1062 static void esp_register_types(void)
1063 {
1064     type_register_static(&sysbus_esp_info);
1065     type_register_static(&esp_info);
1066 }
1067 
1068 type_init(esp_register_types)
1069