1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 s->async_len = 0; 99 } 100 } 101 102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 103 { 104 if (fifo8_num_used(fifo) == fifo->capacity) { 105 trace_esp_error_fifo_overrun(); 106 return; 107 } 108 109 fifo8_push(fifo, val); 110 } 111 112 static uint8_t esp_fifo_pop(Fifo8 *fifo) 113 { 114 if (fifo8_is_empty(fifo)) { 115 return 0; 116 } 117 118 return fifo8_pop(fifo); 119 } 120 121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 122 { 123 const uint8_t *buf; 124 uint32_t n, n2; 125 int len; 126 127 if (maxlen == 0) { 128 return 0; 129 } 130 131 len = maxlen; 132 buf = fifo8_pop_buf(fifo, len, &n); 133 if (dest) { 134 memcpy(dest, buf, n); 135 } 136 137 /* Add FIFO wraparound if needed */ 138 len -= n; 139 len = MIN(len, fifo8_num_used(fifo)); 140 if (len) { 141 buf = fifo8_pop_buf(fifo, len, &n2); 142 if (dest) { 143 memcpy(&dest[n], buf, n2); 144 } 145 n += n2; 146 } 147 148 return n; 149 } 150 151 static uint32_t esp_get_tc(ESPState *s) 152 { 153 uint32_t dmalen; 154 155 dmalen = s->rregs[ESP_TCLO]; 156 dmalen |= s->rregs[ESP_TCMID] << 8; 157 dmalen |= s->rregs[ESP_TCHI] << 16; 158 159 return dmalen; 160 } 161 162 static void esp_set_tc(ESPState *s, uint32_t dmalen) 163 { 164 uint32_t old_tc = esp_get_tc(s); 165 166 s->rregs[ESP_TCLO] = dmalen; 167 s->rregs[ESP_TCMID] = dmalen >> 8; 168 s->rregs[ESP_TCHI] = dmalen >> 16; 169 170 if (old_tc && dmalen == 0) { 171 s->rregs[ESP_RSTAT] |= STAT_TC; 172 } 173 } 174 175 static uint32_t esp_get_stc(ESPState *s) 176 { 177 uint32_t dmalen; 178 179 dmalen = s->wregs[ESP_TCLO]; 180 dmalen |= s->wregs[ESP_TCMID] << 8; 181 dmalen |= s->wregs[ESP_TCHI] << 16; 182 183 return dmalen; 184 } 185 186 static uint8_t esp_pdma_read(ESPState *s) 187 { 188 uint8_t val; 189 190 val = esp_fifo_pop(&s->fifo); 191 return val; 192 } 193 194 static void esp_pdma_write(ESPState *s, uint8_t val) 195 { 196 uint32_t dmalen = esp_get_tc(s); 197 198 if (dmalen == 0) { 199 return; 200 } 201 202 if (s->do_cmd) { 203 esp_fifo_push(&s->cmdfifo, val); 204 } else { 205 esp_fifo_push(&s->fifo, val); 206 } 207 208 dmalen--; 209 esp_set_tc(s, dmalen); 210 } 211 212 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb) 213 { 214 s->pdma_cb = cb; 215 } 216 217 static int esp_select(ESPState *s) 218 { 219 int target; 220 221 target = s->wregs[ESP_WBUSID] & BUSID_DID; 222 223 s->ti_size = 0; 224 225 if (s->current_req) { 226 /* Started a new command before the old one finished. Cancel it. */ 227 scsi_req_cancel(s->current_req); 228 } 229 230 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 231 if (!s->current_dev) { 232 /* No such drive */ 233 s->rregs[ESP_RSTAT] = 0; 234 s->rregs[ESP_RINTR] = INTR_DC; 235 s->rregs[ESP_RSEQ] = SEQ_0; 236 esp_raise_irq(s); 237 return -1; 238 } 239 240 /* 241 * Note that we deliberately don't raise the IRQ here: this will be done 242 * either in do_command_phase() for DATA OUT transfers or by the deferred 243 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 244 */ 245 s->rregs[ESP_RINTR] |= INTR_FC; 246 s->rregs[ESP_RSEQ] = SEQ_CD; 247 return 0; 248 } 249 250 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 251 { 252 uint8_t buf[ESP_CMDFIFO_SZ]; 253 uint32_t dmalen, n; 254 int target; 255 256 target = s->wregs[ESP_WBUSID] & BUSID_DID; 257 if (s->dma) { 258 dmalen = MIN(esp_get_tc(s), maxlen); 259 if (dmalen == 0) { 260 return 0; 261 } 262 if (s->dma_memory_read) { 263 s->dma_memory_read(s->dma_opaque, buf, dmalen); 264 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); 265 fifo8_push_all(&s->cmdfifo, buf, dmalen); 266 } else { 267 return 0; 268 } 269 } else { 270 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 271 if (dmalen == 0) { 272 return 0; 273 } 274 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 275 n = MIN(fifo8_num_free(&s->cmdfifo), n); 276 fifo8_push_all(&s->cmdfifo, buf, n); 277 } 278 trace_esp_get_cmd(dmalen, target); 279 280 return dmalen; 281 } 282 283 static void do_command_phase(ESPState *s) 284 { 285 uint32_t cmdlen; 286 int32_t datalen; 287 SCSIDevice *current_lun; 288 uint8_t buf[ESP_CMDFIFO_SZ]; 289 290 trace_esp_do_command_phase(s->lun); 291 cmdlen = fifo8_num_used(&s->cmdfifo); 292 if (!cmdlen || !s->current_dev) { 293 return; 294 } 295 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 296 297 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 298 if (!current_lun) { 299 /* No such drive */ 300 s->rregs[ESP_RSTAT] = 0; 301 s->rregs[ESP_RINTR] = INTR_DC; 302 s->rregs[ESP_RSEQ] = SEQ_0; 303 esp_raise_irq(s); 304 return; 305 } 306 307 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); 308 datalen = scsi_req_enqueue(s->current_req); 309 s->ti_size = datalen; 310 fifo8_reset(&s->cmdfifo); 311 if (datalen != 0) { 312 s->rregs[ESP_RSTAT] = STAT_TC; 313 s->rregs[ESP_RSEQ] = SEQ_CD; 314 s->ti_cmd = 0; 315 esp_set_tc(s, 0); 316 if (datalen > 0) { 317 /* 318 * Switch to DATA IN phase but wait until initial data xfer is 319 * complete before raising the command completion interrupt 320 */ 321 s->data_in_ready = false; 322 s->rregs[ESP_RSTAT] |= STAT_DI; 323 } else { 324 s->rregs[ESP_RSTAT] |= STAT_DO; 325 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 326 esp_raise_irq(s); 327 esp_lower_drq(s); 328 } 329 scsi_req_continue(s->current_req); 330 return; 331 } 332 } 333 334 static void do_message_phase(ESPState *s) 335 { 336 if (s->cmdfifo_cdb_offset) { 337 uint8_t message = esp_fifo_pop(&s->cmdfifo); 338 339 trace_esp_do_identify(message); 340 s->lun = message & 7; 341 s->cmdfifo_cdb_offset--; 342 } 343 344 /* Ignore extended messages for now */ 345 if (s->cmdfifo_cdb_offset) { 346 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 347 esp_fifo_pop_buf(&s->cmdfifo, NULL, len); 348 s->cmdfifo_cdb_offset = 0; 349 } 350 } 351 352 static void do_cmd(ESPState *s) 353 { 354 do_message_phase(s); 355 assert(s->cmdfifo_cdb_offset == 0); 356 do_command_phase(s); 357 } 358 359 static void satn_pdma_cb(ESPState *s) 360 { 361 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 362 s->cmdfifo_cdb_offset = 1; 363 s->do_cmd = 0; 364 do_cmd(s); 365 } 366 } 367 368 static void handle_satn(ESPState *s) 369 { 370 int32_t cmdlen; 371 372 if (s->dma && !s->dma_enabled) { 373 s->dma_cb = handle_satn; 374 return; 375 } 376 esp_set_pdma_cb(s, SATN_PDMA_CB); 377 if (esp_select(s) < 0) { 378 return; 379 } 380 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 381 if (cmdlen > 0) { 382 s->cmdfifo_cdb_offset = 1; 383 s->do_cmd = 0; 384 do_cmd(s); 385 } else if (cmdlen == 0) { 386 if (s->dma) { 387 esp_raise_drq(s); 388 } 389 s->do_cmd = 1; 390 /* Target present, but no cmd yet - switch to command phase */ 391 s->rregs[ESP_RSEQ] = SEQ_CD; 392 s->rregs[ESP_RSTAT] = STAT_CD; 393 } 394 } 395 396 static void s_without_satn_pdma_cb(ESPState *s) 397 { 398 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 399 s->cmdfifo_cdb_offset = 0; 400 s->do_cmd = 0; 401 do_cmd(s); 402 } 403 } 404 405 static void handle_s_without_atn(ESPState *s) 406 { 407 int32_t cmdlen; 408 409 if (s->dma && !s->dma_enabled) { 410 s->dma_cb = handle_s_without_atn; 411 return; 412 } 413 esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB); 414 if (esp_select(s) < 0) { 415 return; 416 } 417 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 418 if (cmdlen > 0) { 419 s->cmdfifo_cdb_offset = 0; 420 s->do_cmd = 0; 421 do_cmd(s); 422 } else if (cmdlen == 0) { 423 if (s->dma) { 424 esp_raise_drq(s); 425 } 426 s->do_cmd = 1; 427 /* Target present, but no cmd yet - switch to command phase */ 428 s->rregs[ESP_RSEQ] = SEQ_CD; 429 s->rregs[ESP_RSTAT] = STAT_CD; 430 } 431 } 432 433 static void satn_stop_pdma_cb(ESPState *s) 434 { 435 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 436 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 437 s->do_cmd = 1; 438 s->cmdfifo_cdb_offset = 1; 439 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 440 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 441 s->rregs[ESP_RSEQ] = SEQ_CD; 442 esp_raise_irq(s); 443 } 444 } 445 446 static void handle_satn_stop(ESPState *s) 447 { 448 int32_t cmdlen; 449 450 if (s->dma && !s->dma_enabled) { 451 s->dma_cb = handle_satn_stop; 452 return; 453 } 454 esp_set_pdma_cb(s, SATN_STOP_PDMA_CB); 455 if (esp_select(s) < 0) { 456 return; 457 } 458 cmdlen = get_cmd(s, 1); 459 if (cmdlen > 0) { 460 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 461 s->do_cmd = 1; 462 s->cmdfifo_cdb_offset = 1; 463 s->rregs[ESP_RSTAT] = STAT_MO; 464 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 465 s->rregs[ESP_RSEQ] = SEQ_MO; 466 esp_raise_irq(s); 467 } else if (cmdlen == 0) { 468 if (s->dma) { 469 esp_raise_drq(s); 470 } 471 s->do_cmd = 1; 472 /* Target present, switch to message out phase */ 473 s->rregs[ESP_RSEQ] = SEQ_MO; 474 s->rregs[ESP_RSTAT] = STAT_MO; 475 } 476 } 477 478 static void write_response_pdma_cb(ESPState *s) 479 { 480 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 481 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 482 s->rregs[ESP_RSEQ] = SEQ_CD; 483 esp_raise_irq(s); 484 } 485 486 static void write_response(ESPState *s) 487 { 488 uint8_t buf[2]; 489 490 trace_esp_write_response(s->status); 491 492 buf[0] = s->status; 493 buf[1] = 0; 494 495 if (s->dma) { 496 if (s->dma_memory_write) { 497 s->dma_memory_write(s->dma_opaque, buf, 2); 498 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 499 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 500 s->rregs[ESP_RSEQ] = SEQ_CD; 501 } else { 502 esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB); 503 esp_raise_drq(s); 504 return; 505 } 506 } else { 507 fifo8_reset(&s->fifo); 508 fifo8_push_all(&s->fifo, buf, 2); 509 s->rregs[ESP_RFLAGS] = 2; 510 } 511 esp_raise_irq(s); 512 } 513 514 static void esp_dma_done(ESPState *s) 515 { 516 s->rregs[ESP_RSTAT] |= STAT_TC; 517 s->rregs[ESP_RINTR] |= INTR_BS; 518 s->rregs[ESP_RFLAGS] = 0; 519 esp_set_tc(s, 0); 520 esp_raise_irq(s); 521 } 522 523 static void do_dma_pdma_cb(ESPState *s) 524 { 525 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 526 int len; 527 uint32_t n; 528 529 if (s->do_cmd) { 530 /* Ensure we have received complete command after SATN and stop */ 531 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { 532 return; 533 } 534 535 s->ti_size = 0; 536 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 537 /* No command received */ 538 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 539 return; 540 } 541 542 /* Command has been received */ 543 s->do_cmd = 0; 544 do_cmd(s); 545 } else { 546 /* 547 * Extra message out bytes received: update cmdfifo_cdb_offset 548 * and then switch to command phase 549 */ 550 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 551 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 552 s->rregs[ESP_RSEQ] = SEQ_CD; 553 s->rregs[ESP_RINTR] |= INTR_BS; 554 esp_raise_irq(s); 555 } 556 return; 557 } 558 559 if (!s->current_req) { 560 return; 561 } 562 563 if (to_device) { 564 /* Copy FIFO data to device */ 565 len = MIN(s->async_len, ESP_FIFO_SZ); 566 len = MIN(len, fifo8_num_used(&s->fifo)); 567 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 568 s->async_buf += n; 569 s->async_len -= n; 570 s->ti_size += n; 571 572 if (n < len) { 573 /* Unaligned accesses can cause FIFO wraparound */ 574 len = len - n; 575 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 576 s->async_buf += n; 577 s->async_len -= n; 578 s->ti_size += n; 579 } 580 581 if (s->async_len == 0) { 582 scsi_req_continue(s->current_req); 583 return; 584 } 585 586 if (esp_get_tc(s) == 0) { 587 esp_lower_drq(s); 588 esp_dma_done(s); 589 } 590 591 return; 592 } else { 593 if (s->async_len == 0) { 594 /* Defer until the scsi layer has completed */ 595 scsi_req_continue(s->current_req); 596 s->data_in_ready = false; 597 return; 598 } 599 600 if (esp_get_tc(s) == 0) { 601 esp_lower_drq(s); 602 esp_dma_done(s); 603 } 604 605 /* Copy device data to FIFO */ 606 len = MIN(s->async_len, esp_get_tc(s)); 607 len = MIN(len, fifo8_num_free(&s->fifo)); 608 fifo8_push_all(&s->fifo, s->async_buf, len); 609 s->async_buf += len; 610 s->async_len -= len; 611 s->ti_size -= len; 612 esp_set_tc(s, esp_get_tc(s) - len); 613 } 614 } 615 616 static void esp_do_dma(ESPState *s) 617 { 618 uint32_t len, cmdlen; 619 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 620 uint8_t buf[ESP_CMDFIFO_SZ]; 621 622 len = esp_get_tc(s); 623 if (s->do_cmd) { 624 /* 625 * handle_ti_cmd() case: esp_do_dma() is called only from 626 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 627 */ 628 cmdlen = fifo8_num_used(&s->cmdfifo); 629 trace_esp_do_dma(cmdlen, len); 630 if (s->dma_memory_read) { 631 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 632 s->dma_memory_read(s->dma_opaque, buf, len); 633 fifo8_push_all(&s->cmdfifo, buf, len); 634 } else { 635 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 636 esp_raise_drq(s); 637 return; 638 } 639 trace_esp_handle_ti_cmd(cmdlen); 640 s->ti_size = 0; 641 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 642 /* No command received */ 643 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 644 return; 645 } 646 647 /* Command has been received */ 648 s->do_cmd = 0; 649 do_cmd(s); 650 } else { 651 /* 652 * Extra message out bytes received: update cmdfifo_cdb_offset 653 * and then switch to command phase 654 */ 655 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 656 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 657 s->rregs[ESP_RSEQ] = SEQ_CD; 658 s->rregs[ESP_RINTR] |= INTR_BS; 659 esp_raise_irq(s); 660 } 661 return; 662 } 663 if (!s->current_req) { 664 return; 665 } 666 if (s->async_len == 0) { 667 /* Defer until data is available. */ 668 return; 669 } 670 if (len > s->async_len) { 671 len = s->async_len; 672 } 673 if (to_device) { 674 if (s->dma_memory_read) { 675 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 676 677 esp_set_tc(s, esp_get_tc(s) - len); 678 s->async_buf += len; 679 s->async_len -= len; 680 s->ti_size += len; 681 682 if (s->async_len == 0) { 683 scsi_req_continue(s->current_req); 684 /* 685 * If there is still data to be read from the device then 686 * complete the DMA operation immediately. Otherwise defer 687 * until the scsi layer has completed. 688 */ 689 return; 690 } 691 692 /* Partially filled a scsi buffer. Complete immediately. */ 693 esp_dma_done(s); 694 esp_lower_drq(s); 695 } else { 696 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 697 esp_raise_drq(s); 698 } 699 } else { 700 if (s->dma_memory_write) { 701 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 702 703 esp_set_tc(s, esp_get_tc(s) - len); 704 s->async_buf += len; 705 s->async_len -= len; 706 s->ti_size -= len; 707 708 if (s->async_len == 0) { 709 scsi_req_continue(s->current_req); 710 /* 711 * If there is still data to be read from the device then 712 * complete the DMA operation immediately. Otherwise defer 713 * until the scsi layer has completed. 714 */ 715 if (esp_get_tc(s) != 0 || s->ti_size == 0) { 716 return; 717 } 718 } 719 720 /* Partially filled a scsi buffer. Complete immediately. */ 721 esp_dma_done(s); 722 esp_lower_drq(s); 723 } else { 724 /* Adjust TC for any leftover data in the FIFO */ 725 if (!fifo8_is_empty(&s->fifo)) { 726 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 727 } 728 729 /* Copy device data to FIFO */ 730 len = MIN(len, fifo8_num_free(&s->fifo)); 731 fifo8_push_all(&s->fifo, s->async_buf, len); 732 s->async_buf += len; 733 s->async_len -= len; 734 s->ti_size -= len; 735 736 /* 737 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 738 * commands shorter than this must be padded accordingly 739 */ 740 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 741 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 742 esp_fifo_push(&s->fifo, 0); 743 len++; 744 } 745 } 746 747 esp_set_tc(s, esp_get_tc(s) - len); 748 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 749 esp_raise_drq(s); 750 } 751 } 752 } 753 754 static void esp_do_nodma(ESPState *s) 755 { 756 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 757 uint32_t cmdlen; 758 int len; 759 760 if (s->do_cmd) { 761 cmdlen = fifo8_num_used(&s->cmdfifo); 762 trace_esp_handle_ti_cmd(cmdlen); 763 s->ti_size = 0; 764 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 765 /* No command received */ 766 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 767 return; 768 } 769 770 /* Command has been received */ 771 s->do_cmd = 0; 772 do_cmd(s); 773 } else { 774 /* 775 * Extra message out bytes received: update cmdfifo_cdb_offset 776 * and then switch to command phase 777 */ 778 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 779 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 780 s->rregs[ESP_RSEQ] = SEQ_CD; 781 s->rregs[ESP_RINTR] |= INTR_BS; 782 esp_raise_irq(s); 783 } 784 return; 785 } 786 787 if (!s->current_req) { 788 return; 789 } 790 791 if (s->async_len == 0) { 792 /* Defer until data is available. */ 793 return; 794 } 795 796 if (to_device) { 797 len = MIN(s->async_len, ESP_FIFO_SZ); 798 len = MIN(len, fifo8_num_used(&s->fifo)); 799 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 800 s->async_buf += len; 801 s->async_len -= len; 802 s->ti_size += len; 803 } else { 804 if (fifo8_is_empty(&s->fifo)) { 805 fifo8_push(&s->fifo, s->async_buf[0]); 806 s->async_buf++; 807 s->async_len--; 808 s->ti_size--; 809 } 810 } 811 812 if (s->async_len == 0) { 813 scsi_req_continue(s->current_req); 814 return; 815 } 816 817 s->rregs[ESP_RINTR] |= INTR_BS; 818 esp_raise_irq(s); 819 } 820 821 static void esp_pdma_cb(ESPState *s) 822 { 823 switch (s->pdma_cb) { 824 case SATN_PDMA_CB: 825 satn_pdma_cb(s); 826 break; 827 case S_WITHOUT_SATN_PDMA_CB: 828 s_without_satn_pdma_cb(s); 829 break; 830 case SATN_STOP_PDMA_CB: 831 satn_stop_pdma_cb(s); 832 break; 833 case WRITE_RESPONSE_PDMA_CB: 834 write_response_pdma_cb(s); 835 break; 836 case DO_DMA_PDMA_CB: 837 do_dma_pdma_cb(s); 838 break; 839 default: 840 g_assert_not_reached(); 841 } 842 } 843 844 void esp_command_complete(SCSIRequest *req, size_t resid) 845 { 846 ESPState *s = req->hba_private; 847 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 848 849 trace_esp_command_complete(); 850 851 /* 852 * Non-DMA transfers from the target will leave the last byte in 853 * the FIFO so don't reset ti_size in this case 854 */ 855 if (s->dma || to_device) { 856 if (s->ti_size != 0) { 857 trace_esp_command_complete_unexpected(); 858 } 859 s->ti_size = 0; 860 } 861 862 s->async_len = 0; 863 if (req->status) { 864 trace_esp_command_complete_fail(); 865 } 866 s->status = req->status; 867 868 /* 869 * If the transfer is finished, switch to status phase. For non-DMA 870 * transfers from the target the last byte is still in the FIFO 871 */ 872 if (s->ti_size == 0) { 873 s->rregs[ESP_RSTAT] &= ~7; 874 s->rregs[ESP_RSTAT] |= STAT_ST; 875 esp_dma_done(s); 876 esp_lower_drq(s); 877 } 878 879 if (s->current_req) { 880 scsi_req_unref(s->current_req); 881 s->current_req = NULL; 882 s->current_dev = NULL; 883 } 884 } 885 886 void esp_transfer_data(SCSIRequest *req, uint32_t len) 887 { 888 ESPState *s = req->hba_private; 889 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 890 uint32_t dmalen = esp_get_tc(s); 891 892 assert(!s->do_cmd); 893 trace_esp_transfer_data(dmalen, s->ti_size); 894 s->async_len = len; 895 s->async_buf = scsi_req_get_buf(req); 896 897 if (!to_device && !s->data_in_ready) { 898 /* 899 * Initial incoming data xfer is complete so raise command 900 * completion interrupt 901 */ 902 s->data_in_ready = true; 903 s->rregs[ESP_RSTAT] |= STAT_TC; 904 s->rregs[ESP_RINTR] |= INTR_BS; 905 esp_raise_irq(s); 906 } 907 908 if (s->ti_cmd == 0) { 909 /* 910 * Always perform the initial transfer upon reception of the next TI 911 * command to ensure the DMA/non-DMA status of the command is correct. 912 * It is not possible to use s->dma directly in the section below as 913 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 914 * async data transfer is delayed then s->dma is set incorrectly. 915 */ 916 return; 917 } 918 919 if (s->ti_cmd == (CMD_TI | CMD_DMA)) { 920 if (dmalen) { 921 esp_do_dma(s); 922 } else if (s->ti_size <= 0) { 923 /* 924 * If this was the last part of a DMA transfer then the 925 * completion interrupt is deferred to here. 926 */ 927 esp_dma_done(s); 928 esp_lower_drq(s); 929 } 930 } else if (s->ti_cmd == CMD_TI) { 931 esp_do_nodma(s); 932 } 933 } 934 935 static void handle_ti(ESPState *s) 936 { 937 uint32_t dmalen; 938 939 if (s->dma && !s->dma_enabled) { 940 s->dma_cb = handle_ti; 941 return; 942 } 943 944 s->ti_cmd = s->rregs[ESP_CMD]; 945 if (s->dma) { 946 dmalen = esp_get_tc(s); 947 trace_esp_handle_ti(dmalen); 948 s->rregs[ESP_RSTAT] &= ~STAT_TC; 949 esp_do_dma(s); 950 } else { 951 trace_esp_handle_ti(s->ti_size); 952 esp_do_nodma(s); 953 } 954 } 955 956 void esp_hard_reset(ESPState *s) 957 { 958 memset(s->rregs, 0, ESP_REGS); 959 memset(s->wregs, 0, ESP_REGS); 960 s->tchi_written = 0; 961 s->ti_size = 0; 962 s->async_len = 0; 963 fifo8_reset(&s->fifo); 964 fifo8_reset(&s->cmdfifo); 965 s->dma = 0; 966 s->do_cmd = 0; 967 s->dma_cb = NULL; 968 969 s->rregs[ESP_CFG1] = 7; 970 } 971 972 static void esp_soft_reset(ESPState *s) 973 { 974 qemu_irq_lower(s->irq); 975 qemu_irq_lower(s->irq_data); 976 esp_hard_reset(s); 977 } 978 979 static void esp_bus_reset(ESPState *s) 980 { 981 bus_cold_reset(BUS(&s->bus)); 982 } 983 984 static void parent_esp_reset(ESPState *s, int irq, int level) 985 { 986 if (level) { 987 esp_soft_reset(s); 988 } 989 } 990 991 static void esp_run_cmd(ESPState *s) 992 { 993 uint8_t cmd = s->rregs[ESP_CMD]; 994 995 if (cmd & CMD_DMA) { 996 s->dma = 1; 997 /* Reload DMA counter. */ 998 if (esp_get_stc(s) == 0) { 999 esp_set_tc(s, 0x10000); 1000 } else { 1001 esp_set_tc(s, esp_get_stc(s)); 1002 } 1003 } else { 1004 s->dma = 0; 1005 } 1006 switch (cmd & CMD_CMD) { 1007 case CMD_NOP: 1008 trace_esp_mem_writeb_cmd_nop(cmd); 1009 break; 1010 case CMD_FLUSH: 1011 trace_esp_mem_writeb_cmd_flush(cmd); 1012 fifo8_reset(&s->fifo); 1013 break; 1014 case CMD_RESET: 1015 trace_esp_mem_writeb_cmd_reset(cmd); 1016 esp_soft_reset(s); 1017 break; 1018 case CMD_BUSRESET: 1019 trace_esp_mem_writeb_cmd_bus_reset(cmd); 1020 esp_bus_reset(s); 1021 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1022 s->rregs[ESP_RINTR] |= INTR_RST; 1023 esp_raise_irq(s); 1024 } 1025 break; 1026 case CMD_TI: 1027 trace_esp_mem_writeb_cmd_ti(cmd); 1028 handle_ti(s); 1029 break; 1030 case CMD_ICCS: 1031 trace_esp_mem_writeb_cmd_iccs(cmd); 1032 write_response(s); 1033 s->rregs[ESP_RINTR] |= INTR_FC; 1034 s->rregs[ESP_RSTAT] |= STAT_MI; 1035 break; 1036 case CMD_MSGACC: 1037 trace_esp_mem_writeb_cmd_msgacc(cmd); 1038 s->rregs[ESP_RINTR] |= INTR_DC; 1039 s->rregs[ESP_RSEQ] = 0; 1040 s->rregs[ESP_RFLAGS] = 0; 1041 esp_raise_irq(s); 1042 break; 1043 case CMD_PAD: 1044 trace_esp_mem_writeb_cmd_pad(cmd); 1045 s->rregs[ESP_RSTAT] = STAT_TC; 1046 s->rregs[ESP_RINTR] |= INTR_FC; 1047 s->rregs[ESP_RSEQ] = 0; 1048 break; 1049 case CMD_SATN: 1050 trace_esp_mem_writeb_cmd_satn(cmd); 1051 break; 1052 case CMD_RSTATN: 1053 trace_esp_mem_writeb_cmd_rstatn(cmd); 1054 break; 1055 case CMD_SEL: 1056 trace_esp_mem_writeb_cmd_sel(cmd); 1057 handle_s_without_atn(s); 1058 break; 1059 case CMD_SELATN: 1060 trace_esp_mem_writeb_cmd_selatn(cmd); 1061 handle_satn(s); 1062 break; 1063 case CMD_SELATNS: 1064 trace_esp_mem_writeb_cmd_selatns(cmd); 1065 handle_satn_stop(s); 1066 break; 1067 case CMD_ENSEL: 1068 trace_esp_mem_writeb_cmd_ensel(cmd); 1069 s->rregs[ESP_RINTR] = 0; 1070 break; 1071 case CMD_DISSEL: 1072 trace_esp_mem_writeb_cmd_dissel(cmd); 1073 s->rregs[ESP_RINTR] = 0; 1074 esp_raise_irq(s); 1075 break; 1076 default: 1077 trace_esp_error_unhandled_command(cmd); 1078 break; 1079 } 1080 } 1081 1082 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 1083 { 1084 uint32_t val; 1085 1086 switch (saddr) { 1087 case ESP_FIFO: 1088 if (s->dma_memory_read && s->dma_memory_write && 1089 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 1090 /* Data out. */ 1091 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 1092 s->rregs[ESP_FIFO] = 0; 1093 } else { 1094 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { 1095 if (s->ti_size) { 1096 esp_do_nodma(s); 1097 } else { 1098 /* 1099 * The last byte of a non-DMA transfer has been read out 1100 * of the FIFO so switch to status phase 1101 */ 1102 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 1103 } 1104 } 1105 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 1106 } 1107 val = s->rregs[ESP_FIFO]; 1108 break; 1109 case ESP_RINTR: 1110 /* 1111 * Clear sequence step, interrupt register and all status bits 1112 * except TC 1113 */ 1114 val = s->rregs[ESP_RINTR]; 1115 s->rregs[ESP_RINTR] = 0; 1116 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1117 /* 1118 * According to the datasheet ESP_RSEQ should be cleared, but as the 1119 * emulation currently defers information transfers to the next TI 1120 * command leave it for now so that pedantic guests such as the old 1121 * Linux 2.6 driver see the correct flags before the next SCSI phase 1122 * transition. 1123 * 1124 * s->rregs[ESP_RSEQ] = SEQ_0; 1125 */ 1126 esp_lower_irq(s); 1127 break; 1128 case ESP_TCHI: 1129 /* Return the unique id if the value has never been written */ 1130 if (!s->tchi_written) { 1131 val = s->chip_id; 1132 } else { 1133 val = s->rregs[saddr]; 1134 } 1135 break; 1136 case ESP_RFLAGS: 1137 /* Bottom 5 bits indicate number of bytes in FIFO */ 1138 val = fifo8_num_used(&s->fifo); 1139 break; 1140 default: 1141 val = s->rregs[saddr]; 1142 break; 1143 } 1144 1145 trace_esp_mem_readb(saddr, val); 1146 return val; 1147 } 1148 1149 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 1150 { 1151 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 1152 switch (saddr) { 1153 case ESP_TCHI: 1154 s->tchi_written = true; 1155 /* fall through */ 1156 case ESP_TCLO: 1157 case ESP_TCMID: 1158 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1159 break; 1160 case ESP_FIFO: 1161 if (s->do_cmd) { 1162 esp_fifo_push(&s->cmdfifo, val); 1163 1164 /* 1165 * If any unexpected message out/command phase data is 1166 * transferred using non-DMA, raise the interrupt 1167 */ 1168 if (s->rregs[ESP_CMD] == CMD_TI) { 1169 s->rregs[ESP_RINTR] |= INTR_BS; 1170 esp_raise_irq(s); 1171 } 1172 } else { 1173 esp_fifo_push(&s->fifo, val); 1174 } 1175 break; 1176 case ESP_CMD: 1177 s->rregs[saddr] = val; 1178 esp_run_cmd(s); 1179 break; 1180 case ESP_WBUSID ... ESP_WSYNO: 1181 break; 1182 case ESP_CFG1: 1183 case ESP_CFG2: case ESP_CFG3: 1184 case ESP_RES3: case ESP_RES4: 1185 s->rregs[saddr] = val; 1186 break; 1187 case ESP_WCCF ... ESP_WTEST: 1188 break; 1189 default: 1190 trace_esp_error_invalid_write(val, saddr); 1191 return; 1192 } 1193 s->wregs[saddr] = val; 1194 } 1195 1196 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1197 unsigned size, bool is_write, 1198 MemTxAttrs attrs) 1199 { 1200 return (size == 1) || (is_write && size == 4); 1201 } 1202 1203 static bool esp_is_before_version_5(void *opaque, int version_id) 1204 { 1205 ESPState *s = ESP(opaque); 1206 1207 version_id = MIN(version_id, s->mig_version_id); 1208 return version_id < 5; 1209 } 1210 1211 static bool esp_is_version_5(void *opaque, int version_id) 1212 { 1213 ESPState *s = ESP(opaque); 1214 1215 version_id = MIN(version_id, s->mig_version_id); 1216 return version_id >= 5; 1217 } 1218 1219 static bool esp_is_version_6(void *opaque, int version_id) 1220 { 1221 ESPState *s = ESP(opaque); 1222 1223 version_id = MIN(version_id, s->mig_version_id); 1224 return version_id >= 6; 1225 } 1226 1227 int esp_pre_save(void *opaque) 1228 { 1229 ESPState *s = ESP(object_resolve_path_component( 1230 OBJECT(opaque), "esp")); 1231 1232 s->mig_version_id = vmstate_esp.version_id; 1233 return 0; 1234 } 1235 1236 static int esp_post_load(void *opaque, int version_id) 1237 { 1238 ESPState *s = ESP(opaque); 1239 int len, i; 1240 1241 version_id = MIN(version_id, s->mig_version_id); 1242 1243 if (version_id < 5) { 1244 esp_set_tc(s, s->mig_dma_left); 1245 1246 /* Migrate ti_buf to fifo */ 1247 len = s->mig_ti_wptr - s->mig_ti_rptr; 1248 for (i = 0; i < len; i++) { 1249 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1250 } 1251 1252 /* Migrate cmdbuf to cmdfifo */ 1253 for (i = 0; i < s->mig_cmdlen; i++) { 1254 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1255 } 1256 } 1257 1258 s->mig_version_id = vmstate_esp.version_id; 1259 return 0; 1260 } 1261 1262 /* 1263 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the 1264 * guest CPU to perform the transfers between the SCSI bus and memory 1265 * itself. This is indicated by the dma_memory_read and dma_memory_write 1266 * functions being NULL (in contrast to the ESP PCI device) whilst 1267 * dma_enabled is still set. 1268 */ 1269 1270 static bool esp_pdma_needed(void *opaque) 1271 { 1272 ESPState *s = ESP(opaque); 1273 1274 return s->dma_memory_read == NULL && s->dma_memory_write == NULL && 1275 s->dma_enabled; 1276 } 1277 1278 static const VMStateDescription vmstate_esp_pdma = { 1279 .name = "esp/pdma", 1280 .version_id = 0, 1281 .minimum_version_id = 0, 1282 .needed = esp_pdma_needed, 1283 .fields = (const VMStateField[]) { 1284 VMSTATE_UINT8(pdma_cb, ESPState), 1285 VMSTATE_END_OF_LIST() 1286 } 1287 }; 1288 1289 const VMStateDescription vmstate_esp = { 1290 .name = "esp", 1291 .version_id = 6, 1292 .minimum_version_id = 3, 1293 .post_load = esp_post_load, 1294 .fields = (const VMStateField[]) { 1295 VMSTATE_BUFFER(rregs, ESPState), 1296 VMSTATE_BUFFER(wregs, ESPState), 1297 VMSTATE_INT32(ti_size, ESPState), 1298 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1299 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1300 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1301 VMSTATE_UINT32(status, ESPState), 1302 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1303 esp_is_before_version_5), 1304 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1305 esp_is_before_version_5), 1306 VMSTATE_UINT32(dma, ESPState), 1307 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1308 esp_is_before_version_5, 0, 16), 1309 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1310 esp_is_before_version_5, 16, 1311 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1312 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1313 VMSTATE_UINT32(do_cmd, ESPState), 1314 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1315 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1316 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1317 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1318 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1319 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1320 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1321 VMSTATE_END_OF_LIST() 1322 }, 1323 .subsections = (const VMStateDescription * const []) { 1324 &vmstate_esp_pdma, 1325 NULL 1326 } 1327 }; 1328 1329 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1330 uint64_t val, unsigned int size) 1331 { 1332 SysBusESPState *sysbus = opaque; 1333 ESPState *s = ESP(&sysbus->esp); 1334 uint32_t saddr; 1335 1336 saddr = addr >> sysbus->it_shift; 1337 esp_reg_write(s, saddr, val); 1338 } 1339 1340 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1341 unsigned int size) 1342 { 1343 SysBusESPState *sysbus = opaque; 1344 ESPState *s = ESP(&sysbus->esp); 1345 uint32_t saddr; 1346 1347 saddr = addr >> sysbus->it_shift; 1348 return esp_reg_read(s, saddr); 1349 } 1350 1351 static const MemoryRegionOps sysbus_esp_mem_ops = { 1352 .read = sysbus_esp_mem_read, 1353 .write = sysbus_esp_mem_write, 1354 .endianness = DEVICE_NATIVE_ENDIAN, 1355 .valid.accepts = esp_mem_accepts, 1356 }; 1357 1358 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1359 uint64_t val, unsigned int size) 1360 { 1361 SysBusESPState *sysbus = opaque; 1362 ESPState *s = ESP(&sysbus->esp); 1363 1364 trace_esp_pdma_write(size); 1365 1366 switch (size) { 1367 case 1: 1368 esp_pdma_write(s, val); 1369 break; 1370 case 2: 1371 esp_pdma_write(s, val >> 8); 1372 esp_pdma_write(s, val); 1373 break; 1374 } 1375 esp_pdma_cb(s); 1376 } 1377 1378 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1379 unsigned int size) 1380 { 1381 SysBusESPState *sysbus = opaque; 1382 ESPState *s = ESP(&sysbus->esp); 1383 uint64_t val = 0; 1384 1385 trace_esp_pdma_read(size); 1386 1387 switch (size) { 1388 case 1: 1389 val = esp_pdma_read(s); 1390 break; 1391 case 2: 1392 val = esp_pdma_read(s); 1393 val = (val << 8) | esp_pdma_read(s); 1394 break; 1395 } 1396 if (fifo8_num_used(&s->fifo) < 2) { 1397 esp_pdma_cb(s); 1398 } 1399 return val; 1400 } 1401 1402 static void *esp_load_request(QEMUFile *f, SCSIRequest *req) 1403 { 1404 ESPState *s = container_of(req->bus, ESPState, bus); 1405 1406 scsi_req_ref(req); 1407 s->current_req = req; 1408 return s; 1409 } 1410 1411 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1412 .read = sysbus_esp_pdma_read, 1413 .write = sysbus_esp_pdma_write, 1414 .endianness = DEVICE_NATIVE_ENDIAN, 1415 .valid.min_access_size = 1, 1416 .valid.max_access_size = 4, 1417 .impl.min_access_size = 1, 1418 .impl.max_access_size = 2, 1419 }; 1420 1421 static const struct SCSIBusInfo esp_scsi_info = { 1422 .tcq = false, 1423 .max_target = ESP_MAX_DEVS, 1424 .max_lun = 7, 1425 1426 .load_request = esp_load_request, 1427 .transfer_data = esp_transfer_data, 1428 .complete = esp_command_complete, 1429 .cancel = esp_request_cancelled 1430 }; 1431 1432 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1433 { 1434 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1435 ESPState *s = ESP(&sysbus->esp); 1436 1437 switch (irq) { 1438 case 0: 1439 parent_esp_reset(s, irq, level); 1440 break; 1441 case 1: 1442 esp_dma_enable(s, irq, level); 1443 break; 1444 } 1445 } 1446 1447 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1448 { 1449 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1450 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1451 ESPState *s = ESP(&sysbus->esp); 1452 1453 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1454 return; 1455 } 1456 1457 sysbus_init_irq(sbd, &s->irq); 1458 sysbus_init_irq(sbd, &s->irq_data); 1459 assert(sysbus->it_shift != -1); 1460 1461 s->chip_id = TCHI_FAS100A; 1462 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1463 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1464 sysbus_init_mmio(sbd, &sysbus->iomem); 1465 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1466 sysbus, "esp-pdma", 4); 1467 sysbus_init_mmio(sbd, &sysbus->pdma); 1468 1469 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1470 1471 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); 1472 } 1473 1474 static void sysbus_esp_hard_reset(DeviceState *dev) 1475 { 1476 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1477 ESPState *s = ESP(&sysbus->esp); 1478 1479 esp_hard_reset(s); 1480 } 1481 1482 static void sysbus_esp_init(Object *obj) 1483 { 1484 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1485 1486 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1487 } 1488 1489 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1490 .name = "sysbusespscsi", 1491 .version_id = 2, 1492 .minimum_version_id = 1, 1493 .pre_save = esp_pre_save, 1494 .fields = (const VMStateField[]) { 1495 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1496 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1497 VMSTATE_END_OF_LIST() 1498 } 1499 }; 1500 1501 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1502 { 1503 DeviceClass *dc = DEVICE_CLASS(klass); 1504 1505 dc->realize = sysbus_esp_realize; 1506 dc->reset = sysbus_esp_hard_reset; 1507 dc->vmsd = &vmstate_sysbus_esp_scsi; 1508 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1509 } 1510 1511 static const TypeInfo sysbus_esp_info = { 1512 .name = TYPE_SYSBUS_ESP, 1513 .parent = TYPE_SYS_BUS_DEVICE, 1514 .instance_init = sysbus_esp_init, 1515 .instance_size = sizeof(SysBusESPState), 1516 .class_init = sysbus_esp_class_init, 1517 }; 1518 1519 static void esp_finalize(Object *obj) 1520 { 1521 ESPState *s = ESP(obj); 1522 1523 fifo8_destroy(&s->fifo); 1524 fifo8_destroy(&s->cmdfifo); 1525 } 1526 1527 static void esp_init(Object *obj) 1528 { 1529 ESPState *s = ESP(obj); 1530 1531 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1532 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1533 } 1534 1535 static void esp_class_init(ObjectClass *klass, void *data) 1536 { 1537 DeviceClass *dc = DEVICE_CLASS(klass); 1538 1539 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1540 dc->user_creatable = false; 1541 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1542 } 1543 1544 static const TypeInfo esp_info = { 1545 .name = TYPE_ESP, 1546 .parent = TYPE_DEVICE, 1547 .instance_init = esp_init, 1548 .instance_finalize = esp_finalize, 1549 .instance_size = sizeof(ESPState), 1550 .class_init = esp_class_init, 1551 }; 1552 1553 static void esp_register_types(void) 1554 { 1555 type_register_static(&sysbus_esp_info); 1556 type_register_static(&esp_info); 1557 } 1558 1559 type_init(esp_register_types) 1560