1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 s->async_len = 0; 99 } 100 } 101 102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 103 { 104 if (fifo8_num_used(fifo) == fifo->capacity) { 105 trace_esp_error_fifo_overrun(); 106 return; 107 } 108 109 fifo8_push(fifo, val); 110 } 111 112 static uint8_t esp_fifo_pop(Fifo8 *fifo) 113 { 114 if (fifo8_is_empty(fifo)) { 115 return 0; 116 } 117 118 return fifo8_pop(fifo); 119 } 120 121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 122 { 123 const uint8_t *buf; 124 uint32_t n; 125 126 if (maxlen == 0) { 127 return 0; 128 } 129 130 buf = fifo8_pop_buf(fifo, maxlen, &n); 131 if (dest) { 132 memcpy(dest, buf, n); 133 } 134 135 return n; 136 } 137 138 static uint32_t esp_get_tc(ESPState *s) 139 { 140 uint32_t dmalen; 141 142 dmalen = s->rregs[ESP_TCLO]; 143 dmalen |= s->rregs[ESP_TCMID] << 8; 144 dmalen |= s->rregs[ESP_TCHI] << 16; 145 146 return dmalen; 147 } 148 149 static void esp_set_tc(ESPState *s, uint32_t dmalen) 150 { 151 s->rregs[ESP_TCLO] = dmalen; 152 s->rregs[ESP_TCMID] = dmalen >> 8; 153 s->rregs[ESP_TCHI] = dmalen >> 16; 154 } 155 156 static uint32_t esp_get_stc(ESPState *s) 157 { 158 uint32_t dmalen; 159 160 dmalen = s->wregs[ESP_TCLO]; 161 dmalen |= s->wregs[ESP_TCMID] << 8; 162 dmalen |= s->wregs[ESP_TCHI] << 16; 163 164 return dmalen; 165 } 166 167 static uint8_t esp_pdma_read(ESPState *s) 168 { 169 uint8_t val; 170 171 if (s->do_cmd) { 172 val = esp_fifo_pop(&s->cmdfifo); 173 } else { 174 val = esp_fifo_pop(&s->fifo); 175 } 176 177 return val; 178 } 179 180 static void esp_pdma_write(ESPState *s, uint8_t val) 181 { 182 uint32_t dmalen = esp_get_tc(s); 183 184 if (dmalen == 0) { 185 return; 186 } 187 188 if (s->do_cmd) { 189 esp_fifo_push(&s->cmdfifo, val); 190 } else { 191 esp_fifo_push(&s->fifo, val); 192 } 193 194 dmalen--; 195 esp_set_tc(s, dmalen); 196 } 197 198 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb) 199 { 200 s->pdma_cb = cb; 201 } 202 203 static int esp_select(ESPState *s) 204 { 205 int target; 206 207 target = s->wregs[ESP_WBUSID] & BUSID_DID; 208 209 s->ti_size = 0; 210 fifo8_reset(&s->fifo); 211 212 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 213 if (!s->current_dev) { 214 /* No such drive */ 215 s->rregs[ESP_RSTAT] = 0; 216 s->rregs[ESP_RINTR] = INTR_DC; 217 s->rregs[ESP_RSEQ] = SEQ_0; 218 esp_raise_irq(s); 219 return -1; 220 } 221 222 /* 223 * Note that we deliberately don't raise the IRQ here: this will be done 224 * either in do_command_phase() for DATA OUT transfers or by the deferred 225 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 226 */ 227 s->rregs[ESP_RINTR] |= INTR_FC; 228 s->rregs[ESP_RSEQ] = SEQ_CD; 229 return 0; 230 } 231 232 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 233 { 234 uint8_t buf[ESP_CMDFIFO_SZ]; 235 uint32_t dmalen, n; 236 int target; 237 238 if (s->current_req) { 239 /* Started a new command before the old one finished. Cancel it. */ 240 scsi_req_cancel(s->current_req); 241 } 242 243 target = s->wregs[ESP_WBUSID] & BUSID_DID; 244 if (s->dma) { 245 dmalen = MIN(esp_get_tc(s), maxlen); 246 if (dmalen == 0) { 247 return 0; 248 } 249 if (s->dma_memory_read) { 250 s->dma_memory_read(s->dma_opaque, buf, dmalen); 251 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); 252 fifo8_push_all(&s->cmdfifo, buf, dmalen); 253 } else { 254 if (esp_select(s) < 0) { 255 return -1; 256 } 257 esp_raise_drq(s); 258 return 0; 259 } 260 } else { 261 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 262 if (dmalen == 0) { 263 return 0; 264 } 265 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 266 n = MIN(fifo8_num_free(&s->cmdfifo), n); 267 fifo8_push_all(&s->cmdfifo, buf, n); 268 } 269 trace_esp_get_cmd(dmalen, target); 270 271 if (esp_select(s) < 0) { 272 return -1; 273 } 274 return dmalen; 275 } 276 277 static void do_command_phase(ESPState *s) 278 { 279 uint32_t cmdlen; 280 int32_t datalen; 281 SCSIDevice *current_lun; 282 uint8_t buf[ESP_CMDFIFO_SZ]; 283 284 trace_esp_do_command_phase(s->lun); 285 cmdlen = fifo8_num_used(&s->cmdfifo); 286 if (!cmdlen || !s->current_dev) { 287 return; 288 } 289 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 290 291 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 292 if (!current_lun) { 293 /* No such drive */ 294 s->rregs[ESP_RSTAT] = 0; 295 s->rregs[ESP_RINTR] = INTR_DC; 296 s->rregs[ESP_RSEQ] = SEQ_0; 297 esp_raise_irq(s); 298 return; 299 } 300 301 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); 302 datalen = scsi_req_enqueue(s->current_req); 303 s->ti_size = datalen; 304 fifo8_reset(&s->cmdfifo); 305 if (datalen != 0) { 306 s->rregs[ESP_RSTAT] = STAT_TC; 307 s->rregs[ESP_RSEQ] = SEQ_CD; 308 s->ti_cmd = 0; 309 esp_set_tc(s, 0); 310 if (datalen > 0) { 311 /* 312 * Switch to DATA IN phase but wait until initial data xfer is 313 * complete before raising the command completion interrupt 314 */ 315 s->data_in_ready = false; 316 s->rregs[ESP_RSTAT] |= STAT_DI; 317 } else { 318 s->rregs[ESP_RSTAT] |= STAT_DO; 319 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 320 esp_raise_irq(s); 321 esp_lower_drq(s); 322 } 323 scsi_req_continue(s->current_req); 324 return; 325 } 326 } 327 328 static void do_message_phase(ESPState *s) 329 { 330 if (s->cmdfifo_cdb_offset) { 331 uint8_t message = esp_fifo_pop(&s->cmdfifo); 332 333 trace_esp_do_identify(message); 334 s->lun = message & 7; 335 s->cmdfifo_cdb_offset--; 336 } 337 338 /* Ignore extended messages for now */ 339 if (s->cmdfifo_cdb_offset) { 340 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 341 esp_fifo_pop_buf(&s->cmdfifo, NULL, len); 342 s->cmdfifo_cdb_offset = 0; 343 } 344 } 345 346 static void do_cmd(ESPState *s) 347 { 348 do_message_phase(s); 349 assert(s->cmdfifo_cdb_offset == 0); 350 do_command_phase(s); 351 } 352 353 static void satn_pdma_cb(ESPState *s) 354 { 355 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 356 s->cmdfifo_cdb_offset = 1; 357 s->do_cmd = 0; 358 do_cmd(s); 359 } 360 } 361 362 static void handle_satn(ESPState *s) 363 { 364 int32_t cmdlen; 365 366 if (s->dma && !s->dma_enabled) { 367 s->dma_cb = handle_satn; 368 return; 369 } 370 esp_set_pdma_cb(s, SATN_PDMA_CB); 371 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 372 if (cmdlen > 0) { 373 s->cmdfifo_cdb_offset = 1; 374 s->do_cmd = 0; 375 do_cmd(s); 376 } else if (cmdlen == 0) { 377 s->do_cmd = 1; 378 /* Target present, but no cmd yet - switch to command phase */ 379 s->rregs[ESP_RSEQ] = SEQ_CD; 380 s->rregs[ESP_RSTAT] = STAT_CD; 381 } 382 } 383 384 static void s_without_satn_pdma_cb(ESPState *s) 385 { 386 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 387 s->cmdfifo_cdb_offset = 0; 388 s->do_cmd = 0; 389 do_cmd(s); 390 } 391 } 392 393 static void handle_s_without_atn(ESPState *s) 394 { 395 int32_t cmdlen; 396 397 if (s->dma && !s->dma_enabled) { 398 s->dma_cb = handle_s_without_atn; 399 return; 400 } 401 esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB); 402 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 403 if (cmdlen > 0) { 404 s->cmdfifo_cdb_offset = 0; 405 s->do_cmd = 0; 406 do_cmd(s); 407 } else if (cmdlen == 0) { 408 s->do_cmd = 1; 409 /* Target present, but no cmd yet - switch to command phase */ 410 s->rregs[ESP_RSEQ] = SEQ_CD; 411 s->rregs[ESP_RSTAT] = STAT_CD; 412 } 413 } 414 415 static void satn_stop_pdma_cb(ESPState *s) 416 { 417 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 418 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 419 s->do_cmd = 1; 420 s->cmdfifo_cdb_offset = 1; 421 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 422 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 423 s->rregs[ESP_RSEQ] = SEQ_CD; 424 esp_raise_irq(s); 425 } 426 } 427 428 static void handle_satn_stop(ESPState *s) 429 { 430 int32_t cmdlen; 431 432 if (s->dma && !s->dma_enabled) { 433 s->dma_cb = handle_satn_stop; 434 return; 435 } 436 esp_set_pdma_cb(s, SATN_STOP_PDMA_CB); 437 cmdlen = get_cmd(s, 1); 438 if (cmdlen > 0) { 439 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 440 s->do_cmd = 1; 441 s->cmdfifo_cdb_offset = 1; 442 s->rregs[ESP_RSTAT] = STAT_MO; 443 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 444 s->rregs[ESP_RSEQ] = SEQ_MO; 445 esp_raise_irq(s); 446 } else if (cmdlen == 0) { 447 s->do_cmd = 1; 448 /* Target present, switch to message out phase */ 449 s->rregs[ESP_RSEQ] = SEQ_MO; 450 s->rregs[ESP_RSTAT] = STAT_MO; 451 } 452 } 453 454 static void write_response_pdma_cb(ESPState *s) 455 { 456 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 457 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 458 s->rregs[ESP_RSEQ] = SEQ_CD; 459 esp_raise_irq(s); 460 } 461 462 static void write_response(ESPState *s) 463 { 464 uint8_t buf[2]; 465 466 trace_esp_write_response(s->status); 467 468 buf[0] = s->status; 469 buf[1] = 0; 470 471 if (s->dma) { 472 if (s->dma_memory_write) { 473 s->dma_memory_write(s->dma_opaque, buf, 2); 474 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 475 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 476 s->rregs[ESP_RSEQ] = SEQ_CD; 477 } else { 478 esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB); 479 esp_raise_drq(s); 480 return; 481 } 482 } else { 483 fifo8_reset(&s->fifo); 484 fifo8_push_all(&s->fifo, buf, 2); 485 s->rregs[ESP_RFLAGS] = 2; 486 } 487 esp_raise_irq(s); 488 } 489 490 static void esp_dma_done(ESPState *s) 491 { 492 s->rregs[ESP_RSTAT] |= STAT_TC; 493 s->rregs[ESP_RINTR] |= INTR_BS; 494 s->rregs[ESP_RFLAGS] = 0; 495 esp_set_tc(s, 0); 496 esp_raise_irq(s); 497 } 498 499 static void do_dma_pdma_cb(ESPState *s) 500 { 501 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 502 int len; 503 uint32_t n; 504 505 if (s->do_cmd) { 506 /* Ensure we have received complete command after SATN and stop */ 507 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { 508 return; 509 } 510 511 s->ti_size = 0; 512 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 513 /* No command received */ 514 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 515 return; 516 } 517 518 /* Command has been received */ 519 s->do_cmd = 0; 520 do_cmd(s); 521 } else { 522 /* 523 * Extra message out bytes received: update cmdfifo_cdb_offset 524 * and then switch to command phase 525 */ 526 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 527 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 528 s->rregs[ESP_RSEQ] = SEQ_CD; 529 s->rregs[ESP_RINTR] |= INTR_BS; 530 esp_raise_irq(s); 531 } 532 return; 533 } 534 535 if (!s->current_req) { 536 return; 537 } 538 539 if (to_device) { 540 /* Copy FIFO data to device */ 541 len = MIN(s->async_len, ESP_FIFO_SZ); 542 len = MIN(len, fifo8_num_used(&s->fifo)); 543 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 544 s->async_buf += n; 545 s->async_len -= n; 546 s->ti_size += n; 547 548 if (n < len) { 549 /* Unaligned accesses can cause FIFO wraparound */ 550 len = len - n; 551 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 552 s->async_buf += n; 553 s->async_len -= n; 554 s->ti_size += n; 555 } 556 557 if (s->async_len == 0) { 558 scsi_req_continue(s->current_req); 559 return; 560 } 561 562 if (esp_get_tc(s) == 0) { 563 esp_lower_drq(s); 564 esp_dma_done(s); 565 } 566 567 return; 568 } else { 569 if (s->async_len == 0) { 570 /* Defer until the scsi layer has completed */ 571 scsi_req_continue(s->current_req); 572 s->data_in_ready = false; 573 return; 574 } 575 576 if (esp_get_tc(s) != 0) { 577 /* Copy device data to FIFO */ 578 len = MIN(s->async_len, esp_get_tc(s)); 579 len = MIN(len, fifo8_num_free(&s->fifo)); 580 fifo8_push_all(&s->fifo, s->async_buf, len); 581 s->async_buf += len; 582 s->async_len -= len; 583 s->ti_size -= len; 584 esp_set_tc(s, esp_get_tc(s) - len); 585 586 if (esp_get_tc(s) == 0) { 587 /* Indicate transfer to FIFO is complete */ 588 s->rregs[ESP_RSTAT] |= STAT_TC; 589 } 590 return; 591 } 592 593 /* Partially filled a scsi buffer. Complete immediately. */ 594 esp_lower_drq(s); 595 esp_dma_done(s); 596 } 597 } 598 599 static void esp_do_dma(ESPState *s) 600 { 601 uint32_t len, cmdlen; 602 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 603 uint8_t buf[ESP_CMDFIFO_SZ]; 604 605 len = esp_get_tc(s); 606 if (s->do_cmd) { 607 /* 608 * handle_ti_cmd() case: esp_do_dma() is called only from 609 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 610 */ 611 cmdlen = fifo8_num_used(&s->cmdfifo); 612 trace_esp_do_dma(cmdlen, len); 613 if (s->dma_memory_read) { 614 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 615 s->dma_memory_read(s->dma_opaque, buf, len); 616 fifo8_push_all(&s->cmdfifo, buf, len); 617 } else { 618 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 619 esp_raise_drq(s); 620 return; 621 } 622 trace_esp_handle_ti_cmd(cmdlen); 623 s->ti_size = 0; 624 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 625 /* No command received */ 626 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 627 return; 628 } 629 630 /* Command has been received */ 631 s->do_cmd = 0; 632 do_cmd(s); 633 } else { 634 /* 635 * Extra message out bytes received: update cmdfifo_cdb_offset 636 * and then switch to command phase 637 */ 638 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 639 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 640 s->rregs[ESP_RSEQ] = SEQ_CD; 641 s->rregs[ESP_RINTR] |= INTR_BS; 642 esp_raise_irq(s); 643 } 644 return; 645 } 646 if (!s->current_req) { 647 return; 648 } 649 if (s->async_len == 0) { 650 /* Defer until data is available. */ 651 return; 652 } 653 if (len > s->async_len) { 654 len = s->async_len; 655 } 656 if (to_device) { 657 if (s->dma_memory_read) { 658 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 659 } else { 660 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 661 esp_raise_drq(s); 662 return; 663 } 664 } else { 665 if (s->dma_memory_write) { 666 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 667 } else { 668 /* Adjust TC for any leftover data in the FIFO */ 669 if (!fifo8_is_empty(&s->fifo)) { 670 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 671 } 672 673 /* Copy device data to FIFO */ 674 len = MIN(len, fifo8_num_free(&s->fifo)); 675 fifo8_push_all(&s->fifo, s->async_buf, len); 676 s->async_buf += len; 677 s->async_len -= len; 678 s->ti_size -= len; 679 680 /* 681 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 682 * commands shorter than this must be padded accordingly 683 */ 684 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 685 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 686 esp_fifo_push(&s->fifo, 0); 687 len++; 688 } 689 } 690 691 esp_set_tc(s, esp_get_tc(s) - len); 692 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 693 esp_raise_drq(s); 694 695 /* Indicate transfer to FIFO is complete */ 696 s->rregs[ESP_RSTAT] |= STAT_TC; 697 return; 698 } 699 } 700 esp_set_tc(s, esp_get_tc(s) - len); 701 s->async_buf += len; 702 s->async_len -= len; 703 if (to_device) { 704 s->ti_size += len; 705 } else { 706 s->ti_size -= len; 707 } 708 if (s->async_len == 0) { 709 scsi_req_continue(s->current_req); 710 /* 711 * If there is still data to be read from the device then 712 * complete the DMA operation immediately. Otherwise defer 713 * until the scsi layer has completed. 714 */ 715 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { 716 return; 717 } 718 } 719 720 /* Partially filled a scsi buffer. Complete immediately. */ 721 esp_dma_done(s); 722 esp_lower_drq(s); 723 } 724 725 static void esp_do_nodma(ESPState *s) 726 { 727 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 728 uint32_t cmdlen; 729 int len; 730 731 if (s->do_cmd) { 732 cmdlen = fifo8_num_used(&s->cmdfifo); 733 trace_esp_handle_ti_cmd(cmdlen); 734 s->ti_size = 0; 735 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 736 /* No command received */ 737 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 738 return; 739 } 740 741 /* Command has been received */ 742 s->do_cmd = 0; 743 do_cmd(s); 744 } else { 745 /* 746 * Extra message out bytes received: update cmdfifo_cdb_offset 747 * and then switch to command phase 748 */ 749 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 750 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 751 s->rregs[ESP_RSEQ] = SEQ_CD; 752 s->rregs[ESP_RINTR] |= INTR_BS; 753 esp_raise_irq(s); 754 } 755 return; 756 } 757 758 if (!s->current_req) { 759 return; 760 } 761 762 if (s->async_len == 0) { 763 /* Defer until data is available. */ 764 return; 765 } 766 767 if (to_device) { 768 len = MIN(s->async_len, ESP_FIFO_SZ); 769 len = MIN(len, fifo8_num_used(&s->fifo)); 770 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 771 s->async_buf += len; 772 s->async_len -= len; 773 s->ti_size += len; 774 } else { 775 if (fifo8_is_empty(&s->fifo)) { 776 fifo8_push(&s->fifo, s->async_buf[0]); 777 s->async_buf++; 778 s->async_len--; 779 s->ti_size--; 780 } 781 } 782 783 if (s->async_len == 0) { 784 scsi_req_continue(s->current_req); 785 return; 786 } 787 788 s->rregs[ESP_RINTR] |= INTR_BS; 789 esp_raise_irq(s); 790 } 791 792 static void esp_pdma_cb(ESPState *s) 793 { 794 switch (s->pdma_cb) { 795 case SATN_PDMA_CB: 796 satn_pdma_cb(s); 797 break; 798 case S_WITHOUT_SATN_PDMA_CB: 799 s_without_satn_pdma_cb(s); 800 break; 801 case SATN_STOP_PDMA_CB: 802 satn_stop_pdma_cb(s); 803 break; 804 case WRITE_RESPONSE_PDMA_CB: 805 write_response_pdma_cb(s); 806 break; 807 case DO_DMA_PDMA_CB: 808 do_dma_pdma_cb(s); 809 break; 810 default: 811 g_assert_not_reached(); 812 } 813 } 814 815 void esp_command_complete(SCSIRequest *req, size_t resid) 816 { 817 ESPState *s = req->hba_private; 818 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 819 820 trace_esp_command_complete(); 821 822 /* 823 * Non-DMA transfers from the target will leave the last byte in 824 * the FIFO so don't reset ti_size in this case 825 */ 826 if (s->dma || to_device) { 827 if (s->ti_size != 0) { 828 trace_esp_command_complete_unexpected(); 829 } 830 s->ti_size = 0; 831 } 832 833 s->async_len = 0; 834 if (req->status) { 835 trace_esp_command_complete_fail(); 836 } 837 s->status = req->status; 838 839 /* 840 * If the transfer is finished, switch to status phase. For non-DMA 841 * transfers from the target the last byte is still in the FIFO 842 */ 843 if (s->ti_size == 0) { 844 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 845 esp_dma_done(s); 846 esp_lower_drq(s); 847 } 848 849 if (s->current_req) { 850 scsi_req_unref(s->current_req); 851 s->current_req = NULL; 852 s->current_dev = NULL; 853 } 854 } 855 856 void esp_transfer_data(SCSIRequest *req, uint32_t len) 857 { 858 ESPState *s = req->hba_private; 859 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 860 uint32_t dmalen = esp_get_tc(s); 861 862 assert(!s->do_cmd); 863 trace_esp_transfer_data(dmalen, s->ti_size); 864 s->async_len = len; 865 s->async_buf = scsi_req_get_buf(req); 866 867 if (!to_device && !s->data_in_ready) { 868 /* 869 * Initial incoming data xfer is complete so raise command 870 * completion interrupt 871 */ 872 s->data_in_ready = true; 873 s->rregs[ESP_RSTAT] |= STAT_TC; 874 s->rregs[ESP_RINTR] |= INTR_BS; 875 esp_raise_irq(s); 876 } 877 878 if (s->ti_cmd == 0) { 879 /* 880 * Always perform the initial transfer upon reception of the next TI 881 * command to ensure the DMA/non-DMA status of the command is correct. 882 * It is not possible to use s->dma directly in the section below as 883 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 884 * async data transfer is delayed then s->dma is set incorrectly. 885 */ 886 return; 887 } 888 889 if (s->ti_cmd == (CMD_TI | CMD_DMA)) { 890 if (dmalen) { 891 esp_do_dma(s); 892 } else if (s->ti_size <= 0) { 893 /* 894 * If this was the last part of a DMA transfer then the 895 * completion interrupt is deferred to here. 896 */ 897 esp_dma_done(s); 898 esp_lower_drq(s); 899 } 900 } else if (s->ti_cmd == CMD_TI) { 901 esp_do_nodma(s); 902 } 903 } 904 905 static void handle_ti(ESPState *s) 906 { 907 uint32_t dmalen; 908 909 if (s->dma && !s->dma_enabled) { 910 s->dma_cb = handle_ti; 911 return; 912 } 913 914 s->ti_cmd = s->rregs[ESP_CMD]; 915 if (s->dma) { 916 dmalen = esp_get_tc(s); 917 trace_esp_handle_ti(dmalen); 918 s->rregs[ESP_RSTAT] &= ~STAT_TC; 919 esp_do_dma(s); 920 } else { 921 trace_esp_handle_ti(s->ti_size); 922 esp_do_nodma(s); 923 } 924 } 925 926 void esp_hard_reset(ESPState *s) 927 { 928 memset(s->rregs, 0, ESP_REGS); 929 memset(s->wregs, 0, ESP_REGS); 930 s->tchi_written = 0; 931 s->ti_size = 0; 932 s->async_len = 0; 933 fifo8_reset(&s->fifo); 934 fifo8_reset(&s->cmdfifo); 935 s->dma = 0; 936 s->do_cmd = 0; 937 s->dma_cb = NULL; 938 939 s->rregs[ESP_CFG1] = 7; 940 } 941 942 static void esp_soft_reset(ESPState *s) 943 { 944 qemu_irq_lower(s->irq); 945 qemu_irq_lower(s->irq_data); 946 esp_hard_reset(s); 947 } 948 949 static void esp_bus_reset(ESPState *s) 950 { 951 bus_cold_reset(BUS(&s->bus)); 952 } 953 954 static void parent_esp_reset(ESPState *s, int irq, int level) 955 { 956 if (level) { 957 esp_soft_reset(s); 958 } 959 } 960 961 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 962 { 963 uint32_t val; 964 965 switch (saddr) { 966 case ESP_FIFO: 967 if (s->dma_memory_read && s->dma_memory_write && 968 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 969 /* Data out. */ 970 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 971 s->rregs[ESP_FIFO] = 0; 972 } else { 973 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { 974 if (s->ti_size) { 975 esp_do_nodma(s); 976 } else { 977 /* 978 * The last byte of a non-DMA transfer has been read out 979 * of the FIFO so switch to status phase 980 */ 981 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 982 } 983 } 984 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 985 } 986 val = s->rregs[ESP_FIFO]; 987 break; 988 case ESP_RINTR: 989 /* 990 * Clear sequence step, interrupt register and all status bits 991 * except TC 992 */ 993 val = s->rregs[ESP_RINTR]; 994 s->rregs[ESP_RINTR] = 0; 995 s->rregs[ESP_RSTAT] &= ~STAT_TC; 996 /* 997 * According to the datasheet ESP_RSEQ should be cleared, but as the 998 * emulation currently defers information transfers to the next TI 999 * command leave it for now so that pedantic guests such as the old 1000 * Linux 2.6 driver see the correct flags before the next SCSI phase 1001 * transition. 1002 * 1003 * s->rregs[ESP_RSEQ] = SEQ_0; 1004 */ 1005 esp_lower_irq(s); 1006 break; 1007 case ESP_TCHI: 1008 /* Return the unique id if the value has never been written */ 1009 if (!s->tchi_written) { 1010 val = s->chip_id; 1011 } else { 1012 val = s->rregs[saddr]; 1013 } 1014 break; 1015 case ESP_RFLAGS: 1016 /* Bottom 5 bits indicate number of bytes in FIFO */ 1017 val = fifo8_num_used(&s->fifo); 1018 break; 1019 default: 1020 val = s->rregs[saddr]; 1021 break; 1022 } 1023 1024 trace_esp_mem_readb(saddr, val); 1025 return val; 1026 } 1027 1028 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 1029 { 1030 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 1031 switch (saddr) { 1032 case ESP_TCHI: 1033 s->tchi_written = true; 1034 /* fall through */ 1035 case ESP_TCLO: 1036 case ESP_TCMID: 1037 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1038 break; 1039 case ESP_FIFO: 1040 if (s->do_cmd) { 1041 esp_fifo_push(&s->cmdfifo, val); 1042 1043 /* 1044 * If any unexpected message out/command phase data is 1045 * transferred using non-DMA, raise the interrupt 1046 */ 1047 if (s->rregs[ESP_CMD] == CMD_TI) { 1048 s->rregs[ESP_RINTR] |= INTR_BS; 1049 esp_raise_irq(s); 1050 } 1051 } else { 1052 esp_fifo_push(&s->fifo, val); 1053 } 1054 break; 1055 case ESP_CMD: 1056 s->rregs[saddr] = val; 1057 if (val & CMD_DMA) { 1058 s->dma = 1; 1059 /* Reload DMA counter. */ 1060 if (esp_get_stc(s) == 0) { 1061 esp_set_tc(s, 0x10000); 1062 } else { 1063 esp_set_tc(s, esp_get_stc(s)); 1064 } 1065 } else { 1066 s->dma = 0; 1067 } 1068 switch (val & CMD_CMD) { 1069 case CMD_NOP: 1070 trace_esp_mem_writeb_cmd_nop(val); 1071 break; 1072 case CMD_FLUSH: 1073 trace_esp_mem_writeb_cmd_flush(val); 1074 fifo8_reset(&s->fifo); 1075 break; 1076 case CMD_RESET: 1077 trace_esp_mem_writeb_cmd_reset(val); 1078 esp_soft_reset(s); 1079 break; 1080 case CMD_BUSRESET: 1081 trace_esp_mem_writeb_cmd_bus_reset(val); 1082 esp_bus_reset(s); 1083 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1084 s->rregs[ESP_RINTR] |= INTR_RST; 1085 esp_raise_irq(s); 1086 } 1087 break; 1088 case CMD_TI: 1089 trace_esp_mem_writeb_cmd_ti(val); 1090 handle_ti(s); 1091 break; 1092 case CMD_ICCS: 1093 trace_esp_mem_writeb_cmd_iccs(val); 1094 write_response(s); 1095 s->rregs[ESP_RINTR] |= INTR_FC; 1096 s->rregs[ESP_RSTAT] |= STAT_MI; 1097 break; 1098 case CMD_MSGACC: 1099 trace_esp_mem_writeb_cmd_msgacc(val); 1100 s->rregs[ESP_RINTR] |= INTR_DC; 1101 s->rregs[ESP_RSEQ] = 0; 1102 s->rregs[ESP_RFLAGS] = 0; 1103 esp_raise_irq(s); 1104 break; 1105 case CMD_PAD: 1106 trace_esp_mem_writeb_cmd_pad(val); 1107 s->rregs[ESP_RSTAT] = STAT_TC; 1108 s->rregs[ESP_RINTR] |= INTR_FC; 1109 s->rregs[ESP_RSEQ] = 0; 1110 break; 1111 case CMD_SATN: 1112 trace_esp_mem_writeb_cmd_satn(val); 1113 break; 1114 case CMD_RSTATN: 1115 trace_esp_mem_writeb_cmd_rstatn(val); 1116 break; 1117 case CMD_SEL: 1118 trace_esp_mem_writeb_cmd_sel(val); 1119 handle_s_without_atn(s); 1120 break; 1121 case CMD_SELATN: 1122 trace_esp_mem_writeb_cmd_selatn(val); 1123 handle_satn(s); 1124 break; 1125 case CMD_SELATNS: 1126 trace_esp_mem_writeb_cmd_selatns(val); 1127 handle_satn_stop(s); 1128 break; 1129 case CMD_ENSEL: 1130 trace_esp_mem_writeb_cmd_ensel(val); 1131 s->rregs[ESP_RINTR] = 0; 1132 break; 1133 case CMD_DISSEL: 1134 trace_esp_mem_writeb_cmd_dissel(val); 1135 s->rregs[ESP_RINTR] = 0; 1136 esp_raise_irq(s); 1137 break; 1138 default: 1139 trace_esp_error_unhandled_command(val); 1140 break; 1141 } 1142 break; 1143 case ESP_WBUSID ... ESP_WSYNO: 1144 break; 1145 case ESP_CFG1: 1146 case ESP_CFG2: case ESP_CFG3: 1147 case ESP_RES3: case ESP_RES4: 1148 s->rregs[saddr] = val; 1149 break; 1150 case ESP_WCCF ... ESP_WTEST: 1151 break; 1152 default: 1153 trace_esp_error_invalid_write(val, saddr); 1154 return; 1155 } 1156 s->wregs[saddr] = val; 1157 } 1158 1159 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1160 unsigned size, bool is_write, 1161 MemTxAttrs attrs) 1162 { 1163 return (size == 1) || (is_write && size == 4); 1164 } 1165 1166 static bool esp_is_before_version_5(void *opaque, int version_id) 1167 { 1168 ESPState *s = ESP(opaque); 1169 1170 version_id = MIN(version_id, s->mig_version_id); 1171 return version_id < 5; 1172 } 1173 1174 static bool esp_is_version_5(void *opaque, int version_id) 1175 { 1176 ESPState *s = ESP(opaque); 1177 1178 version_id = MIN(version_id, s->mig_version_id); 1179 return version_id >= 5; 1180 } 1181 1182 static bool esp_is_version_6(void *opaque, int version_id) 1183 { 1184 ESPState *s = ESP(opaque); 1185 1186 version_id = MIN(version_id, s->mig_version_id); 1187 return version_id >= 6; 1188 } 1189 1190 int esp_pre_save(void *opaque) 1191 { 1192 ESPState *s = ESP(object_resolve_path_component( 1193 OBJECT(opaque), "esp")); 1194 1195 s->mig_version_id = vmstate_esp.version_id; 1196 return 0; 1197 } 1198 1199 static int esp_post_load(void *opaque, int version_id) 1200 { 1201 ESPState *s = ESP(opaque); 1202 int len, i; 1203 1204 version_id = MIN(version_id, s->mig_version_id); 1205 1206 if (version_id < 5) { 1207 esp_set_tc(s, s->mig_dma_left); 1208 1209 /* Migrate ti_buf to fifo */ 1210 len = s->mig_ti_wptr - s->mig_ti_rptr; 1211 for (i = 0; i < len; i++) { 1212 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1213 } 1214 1215 /* Migrate cmdbuf to cmdfifo */ 1216 for (i = 0; i < s->mig_cmdlen; i++) { 1217 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1218 } 1219 } 1220 1221 s->mig_version_id = vmstate_esp.version_id; 1222 return 0; 1223 } 1224 1225 /* 1226 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the 1227 * guest CPU to perform the transfers between the SCSI bus and memory 1228 * itself. This is indicated by the dma_memory_read and dma_memory_write 1229 * functions being NULL (in contrast to the ESP PCI device) whilst 1230 * dma_enabled is still set. 1231 */ 1232 1233 static bool esp_pdma_needed(void *opaque) 1234 { 1235 ESPState *s = ESP(opaque); 1236 1237 return s->dma_memory_read == NULL && s->dma_memory_write == NULL && 1238 s->dma_enabled; 1239 } 1240 1241 static const VMStateDescription vmstate_esp_pdma = { 1242 .name = "esp/pdma", 1243 .version_id = 0, 1244 .minimum_version_id = 0, 1245 .needed = esp_pdma_needed, 1246 .fields = (const VMStateField[]) { 1247 VMSTATE_UINT8(pdma_cb, ESPState), 1248 VMSTATE_END_OF_LIST() 1249 } 1250 }; 1251 1252 const VMStateDescription vmstate_esp = { 1253 .name = "esp", 1254 .version_id = 6, 1255 .minimum_version_id = 3, 1256 .post_load = esp_post_load, 1257 .fields = (const VMStateField[]) { 1258 VMSTATE_BUFFER(rregs, ESPState), 1259 VMSTATE_BUFFER(wregs, ESPState), 1260 VMSTATE_INT32(ti_size, ESPState), 1261 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1262 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1263 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1264 VMSTATE_UINT32(status, ESPState), 1265 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1266 esp_is_before_version_5), 1267 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1268 esp_is_before_version_5), 1269 VMSTATE_UINT32(dma, ESPState), 1270 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1271 esp_is_before_version_5, 0, 16), 1272 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1273 esp_is_before_version_5, 16, 1274 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1275 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1276 VMSTATE_UINT32(do_cmd, ESPState), 1277 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1278 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1279 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1280 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1281 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1282 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1283 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1284 VMSTATE_END_OF_LIST() 1285 }, 1286 .subsections = (const VMStateDescription * const []) { 1287 &vmstate_esp_pdma, 1288 NULL 1289 } 1290 }; 1291 1292 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1293 uint64_t val, unsigned int size) 1294 { 1295 SysBusESPState *sysbus = opaque; 1296 ESPState *s = ESP(&sysbus->esp); 1297 uint32_t saddr; 1298 1299 saddr = addr >> sysbus->it_shift; 1300 esp_reg_write(s, saddr, val); 1301 } 1302 1303 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1304 unsigned int size) 1305 { 1306 SysBusESPState *sysbus = opaque; 1307 ESPState *s = ESP(&sysbus->esp); 1308 uint32_t saddr; 1309 1310 saddr = addr >> sysbus->it_shift; 1311 return esp_reg_read(s, saddr); 1312 } 1313 1314 static const MemoryRegionOps sysbus_esp_mem_ops = { 1315 .read = sysbus_esp_mem_read, 1316 .write = sysbus_esp_mem_write, 1317 .endianness = DEVICE_NATIVE_ENDIAN, 1318 .valid.accepts = esp_mem_accepts, 1319 }; 1320 1321 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1322 uint64_t val, unsigned int size) 1323 { 1324 SysBusESPState *sysbus = opaque; 1325 ESPState *s = ESP(&sysbus->esp); 1326 1327 trace_esp_pdma_write(size); 1328 1329 switch (size) { 1330 case 1: 1331 esp_pdma_write(s, val); 1332 break; 1333 case 2: 1334 esp_pdma_write(s, val >> 8); 1335 esp_pdma_write(s, val); 1336 break; 1337 } 1338 esp_pdma_cb(s); 1339 } 1340 1341 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1342 unsigned int size) 1343 { 1344 SysBusESPState *sysbus = opaque; 1345 ESPState *s = ESP(&sysbus->esp); 1346 uint64_t val = 0; 1347 1348 trace_esp_pdma_read(size); 1349 1350 switch (size) { 1351 case 1: 1352 val = esp_pdma_read(s); 1353 break; 1354 case 2: 1355 val = esp_pdma_read(s); 1356 val = (val << 8) | esp_pdma_read(s); 1357 break; 1358 } 1359 if (fifo8_num_used(&s->fifo) < 2) { 1360 esp_pdma_cb(s); 1361 } 1362 return val; 1363 } 1364 1365 static void *esp_load_request(QEMUFile *f, SCSIRequest *req) 1366 { 1367 ESPState *s = container_of(req->bus, ESPState, bus); 1368 1369 scsi_req_ref(req); 1370 s->current_req = req; 1371 return s; 1372 } 1373 1374 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1375 .read = sysbus_esp_pdma_read, 1376 .write = sysbus_esp_pdma_write, 1377 .endianness = DEVICE_NATIVE_ENDIAN, 1378 .valid.min_access_size = 1, 1379 .valid.max_access_size = 4, 1380 .impl.min_access_size = 1, 1381 .impl.max_access_size = 2, 1382 }; 1383 1384 static const struct SCSIBusInfo esp_scsi_info = { 1385 .tcq = false, 1386 .max_target = ESP_MAX_DEVS, 1387 .max_lun = 7, 1388 1389 .load_request = esp_load_request, 1390 .transfer_data = esp_transfer_data, 1391 .complete = esp_command_complete, 1392 .cancel = esp_request_cancelled 1393 }; 1394 1395 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1396 { 1397 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1398 ESPState *s = ESP(&sysbus->esp); 1399 1400 switch (irq) { 1401 case 0: 1402 parent_esp_reset(s, irq, level); 1403 break; 1404 case 1: 1405 esp_dma_enable(s, irq, level); 1406 break; 1407 } 1408 } 1409 1410 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1411 { 1412 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1413 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1414 ESPState *s = ESP(&sysbus->esp); 1415 1416 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1417 return; 1418 } 1419 1420 sysbus_init_irq(sbd, &s->irq); 1421 sysbus_init_irq(sbd, &s->irq_data); 1422 assert(sysbus->it_shift != -1); 1423 1424 s->chip_id = TCHI_FAS100A; 1425 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1426 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1427 sysbus_init_mmio(sbd, &sysbus->iomem); 1428 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1429 sysbus, "esp-pdma", 4); 1430 sysbus_init_mmio(sbd, &sysbus->pdma); 1431 1432 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1433 1434 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); 1435 } 1436 1437 static void sysbus_esp_hard_reset(DeviceState *dev) 1438 { 1439 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1440 ESPState *s = ESP(&sysbus->esp); 1441 1442 esp_hard_reset(s); 1443 } 1444 1445 static void sysbus_esp_init(Object *obj) 1446 { 1447 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1448 1449 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1450 } 1451 1452 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1453 .name = "sysbusespscsi", 1454 .version_id = 2, 1455 .minimum_version_id = 1, 1456 .pre_save = esp_pre_save, 1457 .fields = (const VMStateField[]) { 1458 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1459 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1460 VMSTATE_END_OF_LIST() 1461 } 1462 }; 1463 1464 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1465 { 1466 DeviceClass *dc = DEVICE_CLASS(klass); 1467 1468 dc->realize = sysbus_esp_realize; 1469 dc->reset = sysbus_esp_hard_reset; 1470 dc->vmsd = &vmstate_sysbus_esp_scsi; 1471 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1472 } 1473 1474 static const TypeInfo sysbus_esp_info = { 1475 .name = TYPE_SYSBUS_ESP, 1476 .parent = TYPE_SYS_BUS_DEVICE, 1477 .instance_init = sysbus_esp_init, 1478 .instance_size = sizeof(SysBusESPState), 1479 .class_init = sysbus_esp_class_init, 1480 }; 1481 1482 static void esp_finalize(Object *obj) 1483 { 1484 ESPState *s = ESP(obj); 1485 1486 fifo8_destroy(&s->fifo); 1487 fifo8_destroy(&s->cmdfifo); 1488 } 1489 1490 static void esp_init(Object *obj) 1491 { 1492 ESPState *s = ESP(obj); 1493 1494 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1495 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1496 } 1497 1498 static void esp_class_init(ObjectClass *klass, void *data) 1499 { 1500 DeviceClass *dc = DEVICE_CLASS(klass); 1501 1502 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1503 dc->user_creatable = false; 1504 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1505 } 1506 1507 static const TypeInfo esp_info = { 1508 .name = TYPE_ESP, 1509 .parent = TYPE_DEVICE, 1510 .instance_init = esp_init, 1511 .instance_finalize = esp_finalize, 1512 .instance_size = sizeof(ESPState), 1513 .class_init = esp_class_init, 1514 }; 1515 1516 static void esp_register_types(void) 1517 { 1518 type_register_static(&sysbus_esp_info); 1519 type_register_static(&esp_info); 1520 } 1521 1522 type_init(esp_register_types) 1523