xref: /qemu/hw/scsi/esp.c (revision e5455b8c1c6170c788f3c0fd577cc3be53539a99)
1 /*
2  * QEMU ESP/NCR53C9x emulation
3  *
4  * Copyright (c) 2005-2006 Fabrice Bellard
5  * Copyright (c) 2012 Herve Poussineau
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "hw/sysbus.h"
28 #include "migration/vmstate.h"
29 #include "hw/irq.h"
30 #include "hw/scsi/esp.h"
31 #include "trace.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 
35 /*
36  * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37  * also produced as NCR89C100. See
38  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39  * and
40  * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41  *
42  * On Macintosh Quadra it is a NCR53C96.
43  */
44 
45 static void esp_raise_irq(ESPState *s)
46 {
47     if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48         s->rregs[ESP_RSTAT] |= STAT_INT;
49         qemu_irq_raise(s->irq);
50         trace_esp_raise_irq();
51     }
52 }
53 
54 static void esp_lower_irq(ESPState *s)
55 {
56     if (s->rregs[ESP_RSTAT] & STAT_INT) {
57         s->rregs[ESP_RSTAT] &= ~STAT_INT;
58         qemu_irq_lower(s->irq);
59         trace_esp_lower_irq();
60     }
61 }
62 
63 static void esp_raise_drq(ESPState *s)
64 {
65     qemu_irq_raise(s->irq_data);
66     trace_esp_raise_drq();
67 }
68 
69 static void esp_lower_drq(ESPState *s)
70 {
71     qemu_irq_lower(s->irq_data);
72     trace_esp_lower_drq();
73 }
74 
75 void esp_dma_enable(ESPState *s, int irq, int level)
76 {
77     if (level) {
78         s->dma_enabled = 1;
79         trace_esp_dma_enable();
80         if (s->dma_cb) {
81             s->dma_cb(s);
82             s->dma_cb = NULL;
83         }
84     } else {
85         trace_esp_dma_disable();
86         s->dma_enabled = 0;
87     }
88 }
89 
90 void esp_request_cancelled(SCSIRequest *req)
91 {
92     ESPState *s = req->hba_private;
93 
94     if (req == s->current_req) {
95         scsi_req_unref(s->current_req);
96         s->current_req = NULL;
97         s->current_dev = NULL;
98     }
99 }
100 
101 static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
102 {
103     if (fifo8_num_used(fifo) == fifo->capacity) {
104         trace_esp_error_fifo_overrun();
105         return;
106     }
107 
108     fifo8_push(fifo, val);
109 }
110 static uint8_t esp_fifo_pop(ESPState *s)
111 {
112     if (fifo8_is_empty(&s->fifo)) {
113         return 0;
114     }
115 
116     return fifo8_pop(&s->fifo);
117 }
118 
119 static uint8_t esp_cmdfifo_pop(ESPState *s)
120 {
121     if (fifo8_is_empty(&s->cmdfifo)) {
122         return 0;
123     }
124 
125     return fifo8_pop(&s->cmdfifo);
126 }
127 
128 static uint32_t esp_get_tc(ESPState *s)
129 {
130     uint32_t dmalen;
131 
132     dmalen = s->rregs[ESP_TCLO];
133     dmalen |= s->rregs[ESP_TCMID] << 8;
134     dmalen |= s->rregs[ESP_TCHI] << 16;
135 
136     return dmalen;
137 }
138 
139 static void esp_set_tc(ESPState *s, uint32_t dmalen)
140 {
141     s->rregs[ESP_TCLO] = dmalen;
142     s->rregs[ESP_TCMID] = dmalen >> 8;
143     s->rregs[ESP_TCHI] = dmalen >> 16;
144 }
145 
146 static uint32_t esp_get_stc(ESPState *s)
147 {
148     uint32_t dmalen;
149 
150     dmalen = s->wregs[ESP_TCLO];
151     dmalen |= s->wregs[ESP_TCMID] << 8;
152     dmalen |= s->wregs[ESP_TCHI] << 16;
153 
154     return dmalen;
155 }
156 
157 static uint8_t esp_pdma_read(ESPState *s)
158 {
159     uint8_t val;
160 
161     if (s->do_cmd) {
162         val = esp_cmdfifo_pop(s);
163     } else {
164         val = esp_fifo_pop(s);
165     }
166 
167     return val;
168 }
169 
170 static void esp_pdma_write(ESPState *s, uint8_t val)
171 {
172     uint32_t dmalen = esp_get_tc(s);
173 
174     if (dmalen == 0) {
175         return;
176     }
177 
178     if (s->do_cmd) {
179         esp_fifo_push(&s->cmdfifo, val);
180     } else {
181         esp_fifo_push(&s->fifo, val);
182     }
183 
184     dmalen--;
185     esp_set_tc(s, dmalen);
186 }
187 
188 static int esp_select(ESPState *s)
189 {
190     int target;
191 
192     target = s->wregs[ESP_WBUSID] & BUSID_DID;
193 
194     s->ti_size = 0;
195     fifo8_reset(&s->fifo);
196 
197     if (s->current_req) {
198         /* Started a new command before the old one finished.  Cancel it.  */
199         scsi_req_cancel(s->current_req);
200         s->async_len = 0;
201     }
202 
203     s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
204     if (!s->current_dev) {
205         /* No such drive */
206         s->rregs[ESP_RSTAT] = 0;
207         s->rregs[ESP_RINTR] |= INTR_DC;
208         s->rregs[ESP_RSEQ] = SEQ_0;
209         esp_raise_irq(s);
210         return -1;
211     }
212 
213     /*
214      * Note that we deliberately don't raise the IRQ here: this will be done
215      * either in do_busid_cmd() for DATA OUT transfers or by the deferred
216      * IRQ mechanism in esp_transfer_data() for DATA IN transfers
217      */
218     s->rregs[ESP_RINTR] |= INTR_FC;
219     s->rregs[ESP_RSEQ] = SEQ_CD;
220     return 0;
221 }
222 
223 static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
224 {
225     uint8_t buf[ESP_CMDFIFO_SZ];
226     uint32_t dmalen, n;
227     int target;
228 
229     target = s->wregs[ESP_WBUSID] & BUSID_DID;
230     if (s->dma) {
231         dmalen = MIN(esp_get_tc(s), maxlen);
232         if (dmalen == 0) {
233             return 0;
234         }
235         if (s->dma_memory_read) {
236             s->dma_memory_read(s->dma_opaque, buf, dmalen);
237             fifo8_push_all(&s->cmdfifo, buf, dmalen);
238         } else {
239             if (esp_select(s) < 0) {
240                 fifo8_reset(&s->cmdfifo);
241                 return -1;
242             }
243             esp_raise_drq(s);
244             fifo8_reset(&s->cmdfifo);
245             return 0;
246         }
247     } else {
248         dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
249         if (dmalen == 0) {
250             return 0;
251         }
252         memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
253         if (dmalen >= 3) {
254             buf[0] = buf[2] >> 5;
255         }
256         fifo8_push_all(&s->cmdfifo, buf, dmalen);
257     }
258     trace_esp_get_cmd(dmalen, target);
259 
260     if (esp_select(s) < 0) {
261         fifo8_reset(&s->cmdfifo);
262         return -1;
263     }
264     return dmalen;
265 }
266 
267 static void do_busid_cmd(ESPState *s, uint8_t busid)
268 {
269     uint32_t n, cmdlen;
270     int32_t datalen;
271     int lun;
272     SCSIDevice *current_lun;
273     uint8_t *buf;
274 
275     trace_esp_do_busid_cmd(busid);
276     lun = busid & 7;
277     cmdlen = fifo8_num_used(&s->cmdfifo);
278     buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
279 
280     current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
281     s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
282     datalen = scsi_req_enqueue(s->current_req);
283     s->ti_size = datalen;
284     fifo8_reset(&s->cmdfifo);
285     if (datalen != 0) {
286         s->rregs[ESP_RSTAT] = STAT_TC;
287         s->rregs[ESP_RSEQ] = SEQ_CD;
288         s->ti_cmd = 0;
289         esp_set_tc(s, 0);
290         if (datalen > 0) {
291             /*
292              * Switch to DATA IN phase but wait until initial data xfer is
293              * complete before raising the command completion interrupt
294              */
295             s->data_in_ready = false;
296             s->rregs[ESP_RSTAT] |= STAT_DI;
297         } else {
298             s->rregs[ESP_RSTAT] |= STAT_DO;
299             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
300             esp_raise_irq(s);
301             esp_lower_drq(s);
302         }
303         scsi_req_continue(s->current_req);
304         return;
305     }
306 }
307 
308 static void do_cmd(ESPState *s)
309 {
310     uint8_t busid = fifo8_pop(&s->cmdfifo);
311     uint32_t n;
312 
313     s->cmdfifo_cdb_offset--;
314 
315     /* Ignore extended messages for now */
316     if (s->cmdfifo_cdb_offset) {
317         fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
318         s->cmdfifo_cdb_offset = 0;
319     }
320 
321     do_busid_cmd(s, busid);
322 }
323 
324 static void satn_pdma_cb(ESPState *s)
325 {
326     s->do_cmd = 0;
327     if (!fifo8_is_empty(&s->cmdfifo)) {
328         s->cmdfifo_cdb_offset = 1;
329         do_cmd(s);
330     }
331 }
332 
333 static void handle_satn(ESPState *s)
334 {
335     int32_t cmdlen;
336 
337     if (s->dma && !s->dma_enabled) {
338         s->dma_cb = handle_satn;
339         return;
340     }
341     s->pdma_cb = satn_pdma_cb;
342     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
343     if (cmdlen > 0) {
344         s->cmdfifo_cdb_offset = 1;
345         do_cmd(s);
346     } else if (cmdlen == 0) {
347         s->do_cmd = 1;
348         /* Target present, but no cmd yet - switch to command phase */
349         s->rregs[ESP_RSEQ] = SEQ_CD;
350         s->rregs[ESP_RSTAT] = STAT_CD;
351     }
352 }
353 
354 static void s_without_satn_pdma_cb(ESPState *s)
355 {
356     uint32_t len;
357 
358     s->do_cmd = 0;
359     len = fifo8_num_used(&s->cmdfifo);
360     if (len) {
361         s->cmdfifo_cdb_offset = 0;
362         do_busid_cmd(s, 0);
363     }
364 }
365 
366 static void handle_s_without_atn(ESPState *s)
367 {
368     int32_t cmdlen;
369 
370     if (s->dma && !s->dma_enabled) {
371         s->dma_cb = handle_s_without_atn;
372         return;
373     }
374     s->pdma_cb = s_without_satn_pdma_cb;
375     cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
376     if (cmdlen > 0) {
377         s->cmdfifo_cdb_offset = 0;
378         do_busid_cmd(s, 0);
379     } else if (cmdlen == 0) {
380         s->do_cmd = 1;
381         /* Target present, but no cmd yet - switch to command phase */
382         s->rregs[ESP_RSEQ] = SEQ_CD;
383         s->rregs[ESP_RSTAT] = STAT_CD;
384     }
385 }
386 
387 static void satn_stop_pdma_cb(ESPState *s)
388 {
389     s->do_cmd = 0;
390     if (!fifo8_is_empty(&s->cmdfifo)) {
391         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
392         s->do_cmd = 1;
393         s->cmdfifo_cdb_offset = 1;
394         s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
395         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
396         s->rregs[ESP_RSEQ] = SEQ_CD;
397         esp_raise_irq(s);
398     }
399 }
400 
401 static void handle_satn_stop(ESPState *s)
402 {
403     int32_t cmdlen;
404 
405     if (s->dma && !s->dma_enabled) {
406         s->dma_cb = handle_satn_stop;
407         return;
408     }
409     s->pdma_cb = satn_stop_pdma_cb;
410     cmdlen = get_cmd(s, 1);
411     if (cmdlen > 0) {
412         trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
413         s->do_cmd = 1;
414         s->cmdfifo_cdb_offset = 1;
415         s->rregs[ESP_RSTAT] = STAT_MO;
416         s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
417         s->rregs[ESP_RSEQ] = SEQ_MO;
418         esp_raise_irq(s);
419     } else if (cmdlen == 0) {
420         s->do_cmd = 1;
421         /* Target present, switch to message out phase */
422         s->rregs[ESP_RSEQ] = SEQ_MO;
423         s->rregs[ESP_RSTAT] = STAT_MO;
424     }
425 }
426 
427 static void write_response_pdma_cb(ESPState *s)
428 {
429     s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
430     s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
431     s->rregs[ESP_RSEQ] = SEQ_CD;
432     esp_raise_irq(s);
433 }
434 
435 static void write_response(ESPState *s)
436 {
437     uint8_t buf[2];
438 
439     trace_esp_write_response(s->status);
440 
441     buf[0] = s->status;
442     buf[1] = 0;
443 
444     if (s->dma) {
445         if (s->dma_memory_write) {
446             s->dma_memory_write(s->dma_opaque, buf, 2);
447             s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
448             s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
449             s->rregs[ESP_RSEQ] = SEQ_CD;
450         } else {
451             s->pdma_cb = write_response_pdma_cb;
452             esp_raise_drq(s);
453             return;
454         }
455     } else {
456         fifo8_reset(&s->fifo);
457         fifo8_push_all(&s->fifo, buf, 2);
458         s->rregs[ESP_RFLAGS] = 2;
459     }
460     esp_raise_irq(s);
461 }
462 
463 static void esp_dma_done(ESPState *s)
464 {
465     s->rregs[ESP_RSTAT] |= STAT_TC;
466     s->rregs[ESP_RINTR] |= INTR_BS;
467     s->rregs[ESP_RSEQ] = 0;
468     s->rregs[ESP_RFLAGS] = 0;
469     esp_set_tc(s, 0);
470     esp_raise_irq(s);
471 }
472 
473 static void do_dma_pdma_cb(ESPState *s)
474 {
475     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
476     int len;
477     uint32_t n;
478 
479     if (s->do_cmd) {
480         s->ti_size = 0;
481         s->do_cmd = 0;
482         do_cmd(s);
483         esp_lower_drq(s);
484         return;
485     }
486 
487     if (!s->current_req) {
488         return;
489     }
490 
491     if (to_device) {
492         /* Copy FIFO data to device */
493         len = MIN(s->async_len, ESP_FIFO_SZ);
494         len = MIN(len, fifo8_num_used(&s->fifo));
495         memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
496         s->async_buf += n;
497         s->async_len -= n;
498         s->ti_size += n;
499 
500         if (n < len) {
501             /* Unaligned accesses can cause FIFO wraparound */
502             len = len - n;
503             memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
504             s->async_buf += n;
505             s->async_len -= n;
506             s->ti_size += n;
507         }
508 
509         if (s->async_len == 0) {
510             scsi_req_continue(s->current_req);
511             return;
512         }
513 
514         if (esp_get_tc(s) == 0) {
515             esp_lower_drq(s);
516             esp_dma_done(s);
517         }
518 
519         return;
520     } else {
521         if (s->async_len == 0) {
522             /* Defer until the scsi layer has completed */
523             scsi_req_continue(s->current_req);
524             s->data_in_ready = false;
525             return;
526         }
527 
528         if (esp_get_tc(s) != 0) {
529             /* Copy device data to FIFO */
530             len = MIN(s->async_len, esp_get_tc(s));
531             len = MIN(len, fifo8_num_free(&s->fifo));
532             fifo8_push_all(&s->fifo, s->async_buf, len);
533             s->async_buf += len;
534             s->async_len -= len;
535             s->ti_size -= len;
536             esp_set_tc(s, esp_get_tc(s) - len);
537 
538             if (esp_get_tc(s) == 0) {
539                 /* Indicate transfer to FIFO is complete */
540                  s->rregs[ESP_RSTAT] |= STAT_TC;
541             }
542             return;
543         }
544 
545         /* Partially filled a scsi buffer. Complete immediately.  */
546         esp_lower_drq(s);
547         esp_dma_done(s);
548     }
549 }
550 
551 static void esp_do_dma(ESPState *s)
552 {
553     uint32_t len, cmdlen;
554     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
555     uint8_t buf[ESP_CMDFIFO_SZ];
556 
557     len = esp_get_tc(s);
558     if (s->do_cmd) {
559         /*
560          * handle_ti_cmd() case: esp_do_dma() is called only from
561          * handle_ti_cmd() with do_cmd != NULL (see the assert())
562          */
563         cmdlen = fifo8_num_used(&s->cmdfifo);
564         trace_esp_do_dma(cmdlen, len);
565         if (s->dma_memory_read) {
566             s->dma_memory_read(s->dma_opaque, buf, len);
567             fifo8_push_all(&s->cmdfifo, buf, len);
568         } else {
569             s->pdma_cb = do_dma_pdma_cb;
570             esp_raise_drq(s);
571             return;
572         }
573         trace_esp_handle_ti_cmd(cmdlen);
574         s->ti_size = 0;
575         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
576             /* No command received */
577             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
578                 return;
579             }
580 
581             /* Command has been received */
582             s->do_cmd = 0;
583             do_cmd(s);
584         } else {
585             /*
586              * Extra message out bytes received: update cmdfifo_cdb_offset
587              * and then switch to commmand phase
588              */
589             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
590             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
591             s->rregs[ESP_RSEQ] = SEQ_CD;
592             s->rregs[ESP_RINTR] |= INTR_BS;
593             esp_raise_irq(s);
594         }
595         return;
596     }
597     if (!s->current_req) {
598         return;
599     }
600     if (s->async_len == 0) {
601         /* Defer until data is available.  */
602         return;
603     }
604     if (len > s->async_len) {
605         len = s->async_len;
606     }
607     if (to_device) {
608         if (s->dma_memory_read) {
609             s->dma_memory_read(s->dma_opaque, s->async_buf, len);
610         } else {
611             s->pdma_cb = do_dma_pdma_cb;
612             esp_raise_drq(s);
613             return;
614         }
615     } else {
616         if (s->dma_memory_write) {
617             s->dma_memory_write(s->dma_opaque, s->async_buf, len);
618         } else {
619             /* Adjust TC for any leftover data in the FIFO */
620             if (!fifo8_is_empty(&s->fifo)) {
621                 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
622             }
623 
624             /* Copy device data to FIFO */
625             len = MIN(len, fifo8_num_free(&s->fifo));
626             fifo8_push_all(&s->fifo, s->async_buf, len);
627             s->async_buf += len;
628             s->async_len -= len;
629             s->ti_size -= len;
630 
631             /*
632              * MacOS toolbox uses a TI length of 16 bytes for all commands, so
633              * commands shorter than this must be padded accordingly
634              */
635             if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
636                 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
637                     esp_fifo_push(&s->fifo, 0);
638                     len++;
639                 }
640             }
641 
642             esp_set_tc(s, esp_get_tc(s) - len);
643             s->pdma_cb = do_dma_pdma_cb;
644             esp_raise_drq(s);
645 
646             /* Indicate transfer to FIFO is complete */
647             s->rregs[ESP_RSTAT] |= STAT_TC;
648             return;
649         }
650     }
651     esp_set_tc(s, esp_get_tc(s) - len);
652     s->async_buf += len;
653     s->async_len -= len;
654     if (to_device) {
655         s->ti_size += len;
656     } else {
657         s->ti_size -= len;
658     }
659     if (s->async_len == 0) {
660         scsi_req_continue(s->current_req);
661         /*
662          * If there is still data to be read from the device then
663          * complete the DMA operation immediately.  Otherwise defer
664          * until the scsi layer has completed.
665          */
666         if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
667             return;
668         }
669     }
670 
671     /* Partially filled a scsi buffer. Complete immediately.  */
672     esp_dma_done(s);
673     esp_lower_drq(s);
674 }
675 
676 static void esp_do_nodma(ESPState *s)
677 {
678     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
679     uint32_t cmdlen, n;
680     int len;
681 
682     if (s->do_cmd) {
683         cmdlen = fifo8_num_used(&s->cmdfifo);
684         trace_esp_handle_ti_cmd(cmdlen);
685         s->ti_size = 0;
686         if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
687             /* No command received */
688             if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
689                 return;
690             }
691 
692             /* Command has been received */
693             s->do_cmd = 0;
694             do_cmd(s);
695         } else {
696             /*
697              * Extra message out bytes received: update cmdfifo_cdb_offset
698              * and then switch to commmand phase
699              */
700             s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
701             s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
702             s->rregs[ESP_RSEQ] = SEQ_CD;
703             s->rregs[ESP_RINTR] |= INTR_BS;
704             esp_raise_irq(s);
705         }
706         return;
707     }
708 
709     if (!s->current_req) {
710         return;
711     }
712 
713     if (s->async_len == 0) {
714         /* Defer until data is available.  */
715         return;
716     }
717 
718     if (to_device) {
719         len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
720         memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
721         s->async_buf += len;
722         s->async_len -= len;
723         s->ti_size += len;
724     } else {
725         len = MIN(s->ti_size, s->async_len);
726         len = MIN(len, fifo8_num_free(&s->fifo));
727         fifo8_push_all(&s->fifo, s->async_buf, len);
728         s->async_buf += len;
729         s->async_len -= len;
730         s->ti_size -= len;
731     }
732 
733     if (s->async_len == 0) {
734         scsi_req_continue(s->current_req);
735 
736         if (to_device || s->ti_size == 0) {
737             return;
738         }
739     }
740 
741     s->rregs[ESP_RINTR] |= INTR_BS;
742     esp_raise_irq(s);
743 }
744 
745 void esp_command_complete(SCSIRequest *req, size_t resid)
746 {
747     ESPState *s = req->hba_private;
748 
749     trace_esp_command_complete();
750     if (s->ti_size != 0) {
751         trace_esp_command_complete_unexpected();
752     }
753     s->ti_size = 0;
754     s->async_len = 0;
755     if (req->status) {
756         trace_esp_command_complete_fail();
757     }
758     s->status = req->status;
759     s->rregs[ESP_RSTAT] = STAT_ST;
760     esp_dma_done(s);
761     esp_lower_drq(s);
762     if (s->current_req) {
763         scsi_req_unref(s->current_req);
764         s->current_req = NULL;
765         s->current_dev = NULL;
766     }
767 }
768 
769 void esp_transfer_data(SCSIRequest *req, uint32_t len)
770 {
771     ESPState *s = req->hba_private;
772     int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
773     uint32_t dmalen = esp_get_tc(s);
774 
775     assert(!s->do_cmd);
776     trace_esp_transfer_data(dmalen, s->ti_size);
777     s->async_len = len;
778     s->async_buf = scsi_req_get_buf(req);
779 
780     if (!to_device && !s->data_in_ready) {
781         /*
782          * Initial incoming data xfer is complete so raise command
783          * completion interrupt
784          */
785         s->data_in_ready = true;
786         s->rregs[ESP_RSTAT] |= STAT_TC;
787         s->rregs[ESP_RINTR] |= INTR_BS;
788         esp_raise_irq(s);
789 
790         /*
791          * If data is ready to transfer and the TI command has already
792          * been executed, start DMA immediately. Otherwise DMA will start
793          * when host sends the TI command
794          */
795         if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
796             esp_do_dma(s);
797         }
798         return;
799     }
800 
801     if (s->ti_cmd == 0) {
802         /*
803          * Always perform the initial transfer upon reception of the next TI
804          * command to ensure the DMA/non-DMA status of the command is correct.
805          * It is not possible to use s->dma directly in the section below as
806          * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
807          * async data transfer is delayed then s->dma is set incorrectly.
808          */
809         return;
810     }
811 
812     if (s->ti_cmd & CMD_DMA) {
813         if (dmalen) {
814             esp_do_dma(s);
815         } else if (s->ti_size <= 0) {
816             /*
817              * If this was the last part of a DMA transfer then the
818              * completion interrupt is deferred to here.
819              */
820             esp_dma_done(s);
821             esp_lower_drq(s);
822         }
823     } else {
824         esp_do_nodma(s);
825     }
826 }
827 
828 static void handle_ti(ESPState *s)
829 {
830     uint32_t dmalen;
831 
832     if (s->dma && !s->dma_enabled) {
833         s->dma_cb = handle_ti;
834         return;
835     }
836 
837     s->ti_cmd = s->rregs[ESP_CMD];
838     if (s->dma) {
839         dmalen = esp_get_tc(s);
840         trace_esp_handle_ti(dmalen);
841         s->rregs[ESP_RSTAT] &= ~STAT_TC;
842         esp_do_dma(s);
843     } else {
844         trace_esp_handle_ti(s->ti_size);
845         esp_do_nodma(s);
846     }
847 }
848 
849 void esp_hard_reset(ESPState *s)
850 {
851     memset(s->rregs, 0, ESP_REGS);
852     memset(s->wregs, 0, ESP_REGS);
853     s->tchi_written = 0;
854     s->ti_size = 0;
855     fifo8_reset(&s->fifo);
856     fifo8_reset(&s->cmdfifo);
857     s->dma = 0;
858     s->do_cmd = 0;
859     s->dma_cb = NULL;
860 
861     s->rregs[ESP_CFG1] = 7;
862 }
863 
864 static void esp_soft_reset(ESPState *s)
865 {
866     qemu_irq_lower(s->irq);
867     qemu_irq_lower(s->irq_data);
868     esp_hard_reset(s);
869 }
870 
871 static void parent_esp_reset(ESPState *s, int irq, int level)
872 {
873     if (level) {
874         esp_soft_reset(s);
875     }
876 }
877 
878 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
879 {
880     uint32_t val;
881 
882     switch (saddr) {
883     case ESP_FIFO:
884         if (s->dma_memory_read && s->dma_memory_write &&
885                 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
886             /* Data out.  */
887             qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
888             s->rregs[ESP_FIFO] = 0;
889         } else {
890             s->rregs[ESP_FIFO] = esp_fifo_pop(s);
891         }
892         val = s->rregs[ESP_FIFO];
893         break;
894     case ESP_RINTR:
895         /*
896          * Clear sequence step, interrupt register and all status bits
897          * except TC
898          */
899         val = s->rregs[ESP_RINTR];
900         s->rregs[ESP_RINTR] = 0;
901         s->rregs[ESP_RSTAT] &= ~STAT_TC;
902         s->rregs[ESP_RSEQ] = SEQ_0;
903         esp_lower_irq(s);
904         break;
905     case ESP_TCHI:
906         /* Return the unique id if the value has never been written */
907         if (!s->tchi_written) {
908             val = s->chip_id;
909         } else {
910             val = s->rregs[saddr];
911         }
912         break;
913      case ESP_RFLAGS:
914         /* Bottom 5 bits indicate number of bytes in FIFO */
915         val = fifo8_num_used(&s->fifo);
916         break;
917     default:
918         val = s->rregs[saddr];
919         break;
920     }
921 
922     trace_esp_mem_readb(saddr, val);
923     return val;
924 }
925 
926 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
927 {
928     trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
929     switch (saddr) {
930     case ESP_TCHI:
931         s->tchi_written = true;
932         /* fall through */
933     case ESP_TCLO:
934     case ESP_TCMID:
935         s->rregs[ESP_RSTAT] &= ~STAT_TC;
936         break;
937     case ESP_FIFO:
938         if (s->do_cmd) {
939             esp_fifo_push(&s->cmdfifo, val);
940         } else {
941             esp_fifo_push(&s->fifo, val);
942         }
943 
944         /* Non-DMA transfers raise an interrupt after every byte */
945         if (s->rregs[ESP_CMD] == CMD_TI) {
946             s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
947             esp_raise_irq(s);
948         }
949         break;
950     case ESP_CMD:
951         s->rregs[saddr] = val;
952         if (val & CMD_DMA) {
953             s->dma = 1;
954             /* Reload DMA counter.  */
955             if (esp_get_stc(s) == 0) {
956                 esp_set_tc(s, 0x10000);
957             } else {
958                 esp_set_tc(s, esp_get_stc(s));
959             }
960         } else {
961             s->dma = 0;
962         }
963         switch (val & CMD_CMD) {
964         case CMD_NOP:
965             trace_esp_mem_writeb_cmd_nop(val);
966             break;
967         case CMD_FLUSH:
968             trace_esp_mem_writeb_cmd_flush(val);
969             fifo8_reset(&s->fifo);
970             break;
971         case CMD_RESET:
972             trace_esp_mem_writeb_cmd_reset(val);
973             esp_soft_reset(s);
974             break;
975         case CMD_BUSRESET:
976             trace_esp_mem_writeb_cmd_bus_reset(val);
977             if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
978                 s->rregs[ESP_RINTR] |= INTR_RST;
979                 esp_raise_irq(s);
980             }
981             break;
982         case CMD_TI:
983             trace_esp_mem_writeb_cmd_ti(val);
984             handle_ti(s);
985             break;
986         case CMD_ICCS:
987             trace_esp_mem_writeb_cmd_iccs(val);
988             write_response(s);
989             s->rregs[ESP_RINTR] |= INTR_FC;
990             s->rregs[ESP_RSTAT] |= STAT_MI;
991             break;
992         case CMD_MSGACC:
993             trace_esp_mem_writeb_cmd_msgacc(val);
994             s->rregs[ESP_RINTR] |= INTR_DC;
995             s->rregs[ESP_RSEQ] = 0;
996             s->rregs[ESP_RFLAGS] = 0;
997             esp_raise_irq(s);
998             break;
999         case CMD_PAD:
1000             trace_esp_mem_writeb_cmd_pad(val);
1001             s->rregs[ESP_RSTAT] = STAT_TC;
1002             s->rregs[ESP_RINTR] |= INTR_FC;
1003             s->rregs[ESP_RSEQ] = 0;
1004             break;
1005         case CMD_SATN:
1006             trace_esp_mem_writeb_cmd_satn(val);
1007             break;
1008         case CMD_RSTATN:
1009             trace_esp_mem_writeb_cmd_rstatn(val);
1010             break;
1011         case CMD_SEL:
1012             trace_esp_mem_writeb_cmd_sel(val);
1013             handle_s_without_atn(s);
1014             break;
1015         case CMD_SELATN:
1016             trace_esp_mem_writeb_cmd_selatn(val);
1017             handle_satn(s);
1018             break;
1019         case CMD_SELATNS:
1020             trace_esp_mem_writeb_cmd_selatns(val);
1021             handle_satn_stop(s);
1022             break;
1023         case CMD_ENSEL:
1024             trace_esp_mem_writeb_cmd_ensel(val);
1025             s->rregs[ESP_RINTR] = 0;
1026             break;
1027         case CMD_DISSEL:
1028             trace_esp_mem_writeb_cmd_dissel(val);
1029             s->rregs[ESP_RINTR] = 0;
1030             esp_raise_irq(s);
1031             break;
1032         default:
1033             trace_esp_error_unhandled_command(val);
1034             break;
1035         }
1036         break;
1037     case ESP_WBUSID ... ESP_WSYNO:
1038         break;
1039     case ESP_CFG1:
1040     case ESP_CFG2: case ESP_CFG3:
1041     case ESP_RES3: case ESP_RES4:
1042         s->rregs[saddr] = val;
1043         break;
1044     case ESP_WCCF ... ESP_WTEST:
1045         break;
1046     default:
1047         trace_esp_error_invalid_write(val, saddr);
1048         return;
1049     }
1050     s->wregs[saddr] = val;
1051 }
1052 
1053 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1054                             unsigned size, bool is_write,
1055                             MemTxAttrs attrs)
1056 {
1057     return (size == 1) || (is_write && size == 4);
1058 }
1059 
1060 static bool esp_is_before_version_5(void *opaque, int version_id)
1061 {
1062     ESPState *s = ESP(opaque);
1063 
1064     version_id = MIN(version_id, s->mig_version_id);
1065     return version_id < 5;
1066 }
1067 
1068 static bool esp_is_version_5(void *opaque, int version_id)
1069 {
1070     ESPState *s = ESP(opaque);
1071 
1072     version_id = MIN(version_id, s->mig_version_id);
1073     return version_id == 5;
1074 }
1075 
1076 int esp_pre_save(void *opaque)
1077 {
1078     ESPState *s = ESP(object_resolve_path_component(
1079                       OBJECT(opaque), "esp"));
1080 
1081     s->mig_version_id = vmstate_esp.version_id;
1082     return 0;
1083 }
1084 
1085 static int esp_post_load(void *opaque, int version_id)
1086 {
1087     ESPState *s = ESP(opaque);
1088     int len, i;
1089 
1090     version_id = MIN(version_id, s->mig_version_id);
1091 
1092     if (version_id < 5) {
1093         esp_set_tc(s, s->mig_dma_left);
1094 
1095         /* Migrate ti_buf to fifo */
1096         len = s->mig_ti_wptr - s->mig_ti_rptr;
1097         for (i = 0; i < len; i++) {
1098             fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1099         }
1100 
1101         /* Migrate cmdbuf to cmdfifo */
1102         for (i = 0; i < s->mig_cmdlen; i++) {
1103             fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1104         }
1105     }
1106 
1107     s->mig_version_id = vmstate_esp.version_id;
1108     return 0;
1109 }
1110 
1111 const VMStateDescription vmstate_esp = {
1112     .name = "esp",
1113     .version_id = 5,
1114     .minimum_version_id = 3,
1115     .post_load = esp_post_load,
1116     .fields = (VMStateField[]) {
1117         VMSTATE_BUFFER(rregs, ESPState),
1118         VMSTATE_BUFFER(wregs, ESPState),
1119         VMSTATE_INT32(ti_size, ESPState),
1120         VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1121         VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1122         VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1123         VMSTATE_UINT32(status, ESPState),
1124         VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1125                             esp_is_before_version_5),
1126         VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1127                           esp_is_before_version_5),
1128         VMSTATE_UINT32(dma, ESPState),
1129         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1130                               esp_is_before_version_5, 0, 16),
1131         VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1132                               esp_is_before_version_5, 16,
1133                               sizeof(typeof_field(ESPState, mig_cmdbuf))),
1134         VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1135         VMSTATE_UINT32(do_cmd, ESPState),
1136         VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1137         VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1138         VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1139         VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1140         VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1141         VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1142         VMSTATE_END_OF_LIST()
1143     },
1144 };
1145 
1146 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1147                                  uint64_t val, unsigned int size)
1148 {
1149     SysBusESPState *sysbus = opaque;
1150     ESPState *s = ESP(&sysbus->esp);
1151     uint32_t saddr;
1152 
1153     saddr = addr >> sysbus->it_shift;
1154     esp_reg_write(s, saddr, val);
1155 }
1156 
1157 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1158                                     unsigned int size)
1159 {
1160     SysBusESPState *sysbus = opaque;
1161     ESPState *s = ESP(&sysbus->esp);
1162     uint32_t saddr;
1163 
1164     saddr = addr >> sysbus->it_shift;
1165     return esp_reg_read(s, saddr);
1166 }
1167 
1168 static const MemoryRegionOps sysbus_esp_mem_ops = {
1169     .read = sysbus_esp_mem_read,
1170     .write = sysbus_esp_mem_write,
1171     .endianness = DEVICE_NATIVE_ENDIAN,
1172     .valid.accepts = esp_mem_accepts,
1173 };
1174 
1175 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1176                                   uint64_t val, unsigned int size)
1177 {
1178     SysBusESPState *sysbus = opaque;
1179     ESPState *s = ESP(&sysbus->esp);
1180     uint32_t dmalen;
1181 
1182     trace_esp_pdma_write(size);
1183 
1184     switch (size) {
1185     case 1:
1186         esp_pdma_write(s, val);
1187         break;
1188     case 2:
1189         esp_pdma_write(s, val >> 8);
1190         esp_pdma_write(s, val);
1191         break;
1192     }
1193     dmalen = esp_get_tc(s);
1194     if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) {
1195         s->pdma_cb(s);
1196     }
1197 }
1198 
1199 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1200                                      unsigned int size)
1201 {
1202     SysBusESPState *sysbus = opaque;
1203     ESPState *s = ESP(&sysbus->esp);
1204     uint64_t val = 0;
1205 
1206     trace_esp_pdma_read(size);
1207 
1208     switch (size) {
1209     case 1:
1210         val = esp_pdma_read(s);
1211         break;
1212     case 2:
1213         val = esp_pdma_read(s);
1214         val = (val << 8) | esp_pdma_read(s);
1215         break;
1216     }
1217     if (fifo8_num_used(&s->fifo) < 2) {
1218         s->pdma_cb(s);
1219     }
1220     return val;
1221 }
1222 
1223 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1224     .read = sysbus_esp_pdma_read,
1225     .write = sysbus_esp_pdma_write,
1226     .endianness = DEVICE_NATIVE_ENDIAN,
1227     .valid.min_access_size = 1,
1228     .valid.max_access_size = 4,
1229     .impl.min_access_size = 1,
1230     .impl.max_access_size = 2,
1231 };
1232 
1233 static const struct SCSIBusInfo esp_scsi_info = {
1234     .tcq = false,
1235     .max_target = ESP_MAX_DEVS,
1236     .max_lun = 7,
1237 
1238     .transfer_data = esp_transfer_data,
1239     .complete = esp_command_complete,
1240     .cancel = esp_request_cancelled
1241 };
1242 
1243 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1244 {
1245     SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1246     ESPState *s = ESP(&sysbus->esp);
1247 
1248     switch (irq) {
1249     case 0:
1250         parent_esp_reset(s, irq, level);
1251         break;
1252     case 1:
1253         esp_dma_enable(opaque, irq, level);
1254         break;
1255     }
1256 }
1257 
1258 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1259 {
1260     SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1261     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1262     ESPState *s = ESP(&sysbus->esp);
1263 
1264     if (!qdev_realize(DEVICE(s), NULL, errp)) {
1265         return;
1266     }
1267 
1268     sysbus_init_irq(sbd, &s->irq);
1269     sysbus_init_irq(sbd, &s->irq_data);
1270     assert(sysbus->it_shift != -1);
1271 
1272     s->chip_id = TCHI_FAS100A;
1273     memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1274                           sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1275     sysbus_init_mmio(sbd, &sysbus->iomem);
1276     memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1277                           sysbus, "esp-pdma", 4);
1278     sysbus_init_mmio(sbd, &sysbus->pdma);
1279 
1280     qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1281 
1282     scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
1283 }
1284 
1285 static void sysbus_esp_hard_reset(DeviceState *dev)
1286 {
1287     SysBusESPState *sysbus = SYSBUS_ESP(dev);
1288     ESPState *s = ESP(&sysbus->esp);
1289 
1290     esp_hard_reset(s);
1291 }
1292 
1293 static void sysbus_esp_init(Object *obj)
1294 {
1295     SysBusESPState *sysbus = SYSBUS_ESP(obj);
1296 
1297     object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1298 }
1299 
1300 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1301     .name = "sysbusespscsi",
1302     .version_id = 2,
1303     .minimum_version_id = 1,
1304     .pre_save = esp_pre_save,
1305     .fields = (VMStateField[]) {
1306         VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1307         VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1308         VMSTATE_END_OF_LIST()
1309     }
1310 };
1311 
1312 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1313 {
1314     DeviceClass *dc = DEVICE_CLASS(klass);
1315 
1316     dc->realize = sysbus_esp_realize;
1317     dc->reset = sysbus_esp_hard_reset;
1318     dc->vmsd = &vmstate_sysbus_esp_scsi;
1319     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1320 }
1321 
1322 static const TypeInfo sysbus_esp_info = {
1323     .name          = TYPE_SYSBUS_ESP,
1324     .parent        = TYPE_SYS_BUS_DEVICE,
1325     .instance_init = sysbus_esp_init,
1326     .instance_size = sizeof(SysBusESPState),
1327     .class_init    = sysbus_esp_class_init,
1328 };
1329 
1330 static void esp_finalize(Object *obj)
1331 {
1332     ESPState *s = ESP(obj);
1333 
1334     fifo8_destroy(&s->fifo);
1335     fifo8_destroy(&s->cmdfifo);
1336 }
1337 
1338 static void esp_init(Object *obj)
1339 {
1340     ESPState *s = ESP(obj);
1341 
1342     fifo8_create(&s->fifo, ESP_FIFO_SZ);
1343     fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1344 }
1345 
1346 static void esp_class_init(ObjectClass *klass, void *data)
1347 {
1348     DeviceClass *dc = DEVICE_CLASS(klass);
1349 
1350     /* internal device for sysbusesp/pciespscsi, not user-creatable */
1351     dc->user_creatable = false;
1352     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1353 }
1354 
1355 static const TypeInfo esp_info = {
1356     .name = TYPE_ESP,
1357     .parent = TYPE_DEVICE,
1358     .instance_init = esp_init,
1359     .instance_finalize = esp_finalize,
1360     .instance_size = sizeof(ESPState),
1361     .class_init = esp_class_init,
1362 };
1363 
1364 static void esp_register_types(void)
1365 {
1366     type_register_static(&sysbus_esp_info);
1367     type_register_static(&esp_info);
1368 }
1369 
1370 type_init(esp_register_types)
1371