1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * Copyright (c) 2023 Mark Cave-Ayland 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 */ 26 27 #include "qemu/osdep.h" 28 #include "hw/sysbus.h" 29 #include "migration/vmstate.h" 30 #include "hw/irq.h" 31 #include "hw/scsi/esp.h" 32 #include "trace.h" 33 #include "qemu/log.h" 34 #include "qemu/module.h" 35 36 /* 37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 38 * also produced as NCR89C100. See 39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 40 * and 41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 42 * 43 * On Macintosh Quadra it is a NCR53C96. 44 */ 45 46 static void esp_raise_irq(ESPState *s) 47 { 48 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 49 s->rregs[ESP_RSTAT] |= STAT_INT; 50 qemu_irq_raise(s->irq); 51 trace_esp_raise_irq(); 52 } 53 } 54 55 static void esp_lower_irq(ESPState *s) 56 { 57 if (s->rregs[ESP_RSTAT] & STAT_INT) { 58 s->rregs[ESP_RSTAT] &= ~STAT_INT; 59 qemu_irq_lower(s->irq); 60 trace_esp_lower_irq(); 61 } 62 } 63 64 static void esp_raise_drq(ESPState *s) 65 { 66 if (!(s->drq_state)) { 67 qemu_irq_raise(s->drq_irq); 68 trace_esp_raise_drq(); 69 s->drq_state = true; 70 } 71 } 72 73 static void esp_lower_drq(ESPState *s) 74 { 75 if (s->drq_state) { 76 qemu_irq_lower(s->drq_irq); 77 trace_esp_lower_drq(); 78 s->drq_state = false; 79 } 80 } 81 82 static const char *esp_phase_names[8] = { 83 "DATA OUT", "DATA IN", "COMMAND", "STATUS", 84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN" 85 }; 86 87 static void esp_set_phase(ESPState *s, uint8_t phase) 88 { 89 s->rregs[ESP_RSTAT] &= ~7; 90 s->rregs[ESP_RSTAT] |= phase; 91 92 trace_esp_set_phase(esp_phase_names[phase]); 93 } 94 95 static uint8_t esp_get_phase(ESPState *s) 96 { 97 return s->rregs[ESP_RSTAT] & 7; 98 } 99 100 void esp_dma_enable(ESPState *s, int irq, int level) 101 { 102 if (level) { 103 s->dma_enabled = 1; 104 trace_esp_dma_enable(); 105 if (s->dma_cb) { 106 s->dma_cb(s); 107 s->dma_cb = NULL; 108 } 109 } else { 110 trace_esp_dma_disable(); 111 s->dma_enabled = 0; 112 } 113 } 114 115 void esp_request_cancelled(SCSIRequest *req) 116 { 117 ESPState *s = req->hba_private; 118 119 if (req == s->current_req) { 120 scsi_req_unref(s->current_req); 121 s->current_req = NULL; 122 s->current_dev = NULL; 123 s->async_len = 0; 124 } 125 } 126 127 static void esp_fifo_push(ESPState *s, uint8_t val) 128 { 129 if (fifo8_num_used(&s->fifo) == s->fifo.capacity) { 130 trace_esp_error_fifo_overrun(); 131 return; 132 } 133 134 fifo8_push(&s->fifo, val); 135 } 136 137 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len) 138 { 139 fifo8_push_all(&s->fifo, buf, len); 140 } 141 142 static uint8_t esp_fifo_pop(ESPState *s) 143 { 144 if (fifo8_is_empty(&s->fifo)) { 145 return 0; 146 } 147 148 return fifo8_pop(&s->fifo); 149 } 150 151 static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 152 { 153 const uint8_t *buf; 154 uint32_t n, n2; 155 int len; 156 157 if (maxlen == 0) { 158 return 0; 159 } 160 161 len = maxlen; 162 buf = fifo8_pop_buf(fifo, len, &n); 163 if (dest) { 164 memcpy(dest, buf, n); 165 } 166 167 /* Add FIFO wraparound if needed */ 168 len -= n; 169 len = MIN(len, fifo8_num_used(fifo)); 170 if (len) { 171 buf = fifo8_pop_buf(fifo, len, &n2); 172 if (dest) { 173 memcpy(&dest[n], buf, n2); 174 } 175 n += n2; 176 } 177 178 return n; 179 } 180 181 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen) 182 { 183 return esp_fifo8_pop_buf(&s->fifo, dest, maxlen); 184 } 185 186 static uint32_t esp_get_tc(ESPState *s) 187 { 188 uint32_t dmalen; 189 190 dmalen = s->rregs[ESP_TCLO]; 191 dmalen |= s->rregs[ESP_TCMID] << 8; 192 dmalen |= s->rregs[ESP_TCHI] << 16; 193 194 return dmalen; 195 } 196 197 static void esp_set_tc(ESPState *s, uint32_t dmalen) 198 { 199 uint32_t old_tc = esp_get_tc(s); 200 201 s->rregs[ESP_TCLO] = dmalen; 202 s->rregs[ESP_TCMID] = dmalen >> 8; 203 s->rregs[ESP_TCHI] = dmalen >> 16; 204 205 if (old_tc && dmalen == 0) { 206 s->rregs[ESP_RSTAT] |= STAT_TC; 207 } 208 } 209 210 static uint32_t esp_get_stc(ESPState *s) 211 { 212 uint32_t dmalen; 213 214 dmalen = s->wregs[ESP_TCLO]; 215 dmalen |= s->wregs[ESP_TCMID] << 8; 216 dmalen |= s->wregs[ESP_TCHI] << 16; 217 218 return dmalen; 219 } 220 221 static uint8_t esp_pdma_read(ESPState *s) 222 { 223 uint8_t val; 224 225 val = esp_fifo_pop(s); 226 return val; 227 } 228 229 static void esp_pdma_write(ESPState *s, uint8_t val) 230 { 231 uint32_t dmalen = esp_get_tc(s); 232 233 if (dmalen == 0) { 234 return; 235 } 236 237 esp_fifo_push(s, val); 238 239 dmalen--; 240 esp_set_tc(s, dmalen); 241 } 242 243 static int esp_select(ESPState *s) 244 { 245 int target; 246 247 target = s->wregs[ESP_WBUSID] & BUSID_DID; 248 249 s->ti_size = 0; 250 s->rregs[ESP_RSEQ] = SEQ_0; 251 252 if (s->current_req) { 253 /* Started a new command before the old one finished. Cancel it. */ 254 scsi_req_cancel(s->current_req); 255 } 256 257 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 258 if (!s->current_dev) { 259 /* No such drive */ 260 s->rregs[ESP_RSTAT] = 0; 261 s->rregs[ESP_RINTR] = INTR_DC; 262 esp_raise_irq(s); 263 return -1; 264 } 265 266 /* 267 * Note that we deliberately don't raise the IRQ here: this will be done 268 * either in esp_transfer_data() or esp_command_complete() 269 */ 270 return 0; 271 } 272 273 static void esp_do_dma(ESPState *s); 274 static void esp_do_nodma(ESPState *s); 275 276 static void do_command_phase(ESPState *s) 277 { 278 uint32_t cmdlen; 279 int32_t datalen; 280 SCSIDevice *current_lun; 281 uint8_t buf[ESP_CMDFIFO_SZ]; 282 283 trace_esp_do_command_phase(s->lun); 284 cmdlen = fifo8_num_used(&s->cmdfifo); 285 if (!cmdlen || !s->current_dev) { 286 return; 287 } 288 esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen); 289 290 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 291 if (!current_lun) { 292 /* No such drive */ 293 s->rregs[ESP_RSTAT] = 0; 294 s->rregs[ESP_RINTR] = INTR_DC; 295 s->rregs[ESP_RSEQ] = SEQ_0; 296 esp_raise_irq(s); 297 return; 298 } 299 300 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); 301 datalen = scsi_req_enqueue(s->current_req); 302 s->ti_size = datalen; 303 fifo8_reset(&s->cmdfifo); 304 s->data_ready = false; 305 if (datalen != 0) { 306 /* 307 * Switch to DATA phase but wait until initial data xfer is 308 * complete before raising the command completion interrupt 309 */ 310 if (datalen > 0) { 311 esp_set_phase(s, STAT_DI); 312 } else { 313 esp_set_phase(s, STAT_DO); 314 } 315 scsi_req_continue(s->current_req); 316 return; 317 } 318 } 319 320 static void do_message_phase(ESPState *s) 321 { 322 if (s->cmdfifo_cdb_offset) { 323 uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 : 324 fifo8_pop(&s->cmdfifo); 325 326 trace_esp_do_identify(message); 327 s->lun = message & 7; 328 s->cmdfifo_cdb_offset--; 329 } 330 331 /* Ignore extended messages for now */ 332 if (s->cmdfifo_cdb_offset) { 333 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 334 esp_fifo8_pop_buf(&s->cmdfifo, NULL, len); 335 s->cmdfifo_cdb_offset = 0; 336 } 337 } 338 339 static void do_cmd(ESPState *s) 340 { 341 do_message_phase(s); 342 assert(s->cmdfifo_cdb_offset == 0); 343 do_command_phase(s); 344 } 345 346 static void handle_satn(ESPState *s) 347 { 348 if (s->dma && !s->dma_enabled) { 349 s->dma_cb = handle_satn; 350 return; 351 } 352 353 if (esp_select(s) < 0) { 354 return; 355 } 356 357 esp_set_phase(s, STAT_MO); 358 359 if (s->dma) { 360 esp_do_dma(s); 361 } else { 362 esp_do_nodma(s); 363 } 364 } 365 366 static void handle_s_without_atn(ESPState *s) 367 { 368 if (s->dma && !s->dma_enabled) { 369 s->dma_cb = handle_s_without_atn; 370 return; 371 } 372 373 if (esp_select(s) < 0) { 374 return; 375 } 376 377 esp_set_phase(s, STAT_CD); 378 s->cmdfifo_cdb_offset = 0; 379 380 if (s->dma) { 381 esp_do_dma(s); 382 } else { 383 esp_do_nodma(s); 384 } 385 } 386 387 static void handle_satn_stop(ESPState *s) 388 { 389 if (s->dma && !s->dma_enabled) { 390 s->dma_cb = handle_satn_stop; 391 return; 392 } 393 394 if (esp_select(s) < 0) { 395 return; 396 } 397 398 esp_set_phase(s, STAT_MO); 399 s->cmdfifo_cdb_offset = 0; 400 401 if (s->dma) { 402 esp_do_dma(s); 403 } else { 404 esp_do_nodma(s); 405 } 406 } 407 408 static void handle_pad(ESPState *s) 409 { 410 if (s->dma) { 411 esp_do_dma(s); 412 } else { 413 esp_do_nodma(s); 414 } 415 } 416 417 static void write_response(ESPState *s) 418 { 419 trace_esp_write_response(s->status); 420 421 if (s->dma) { 422 esp_do_dma(s); 423 } else { 424 esp_do_nodma(s); 425 } 426 } 427 428 static bool esp_cdb_ready(ESPState *s) 429 { 430 int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset; 431 const uint8_t *pbuf; 432 uint32_t n; 433 int cdblen; 434 435 if (len <= 0) { 436 return false; 437 } 438 439 pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n); 440 if (n < len) { 441 /* 442 * In normal use the cmdfifo should never wrap, but include this check 443 * to prevent a malicious guest from reading past the end of the 444 * cmdfifo data buffer below 445 */ 446 return false; 447 } 448 449 cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]); 450 451 return cdblen < 0 ? false : (len >= cdblen); 452 } 453 454 static void esp_dma_ti_check(ESPState *s) 455 { 456 if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) { 457 s->rregs[ESP_RINTR] |= INTR_BS; 458 esp_raise_irq(s); 459 esp_lower_drq(s); 460 } 461 } 462 463 static void esp_do_dma(ESPState *s) 464 { 465 uint32_t len, cmdlen; 466 uint8_t buf[ESP_CMDFIFO_SZ]; 467 468 len = esp_get_tc(s); 469 470 switch (esp_get_phase(s)) { 471 case STAT_MO: 472 if (s->dma_memory_read) { 473 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 474 s->dma_memory_read(s->dma_opaque, buf, len); 475 esp_set_tc(s, esp_get_tc(s) - len); 476 } else { 477 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 478 len = MIN(fifo8_num_free(&s->cmdfifo), len); 479 esp_raise_drq(s); 480 } 481 482 fifo8_push_all(&s->cmdfifo, buf, len); 483 s->cmdfifo_cdb_offset += len; 484 485 switch (s->rregs[ESP_CMD]) { 486 case CMD_SELATN | CMD_DMA: 487 if (fifo8_num_used(&s->cmdfifo) >= 1) { 488 /* First byte received, switch to command phase */ 489 esp_set_phase(s, STAT_CD); 490 s->rregs[ESP_RSEQ] = SEQ_CD; 491 s->cmdfifo_cdb_offset = 1; 492 493 if (fifo8_num_used(&s->cmdfifo) > 1) { 494 /* Process any additional command phase data */ 495 esp_do_dma(s); 496 } 497 } 498 break; 499 500 case CMD_SELATNS | CMD_DMA: 501 if (fifo8_num_used(&s->cmdfifo) == 1) { 502 /* First byte received, stop in message out phase */ 503 s->rregs[ESP_RSEQ] = SEQ_MO; 504 s->cmdfifo_cdb_offset = 1; 505 506 /* Raise command completion interrupt */ 507 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 508 esp_raise_irq(s); 509 } 510 break; 511 512 case CMD_TI | CMD_DMA: 513 /* ATN remains asserted until TC == 0 */ 514 if (esp_get_tc(s) == 0) { 515 esp_set_phase(s, STAT_CD); 516 s->rregs[ESP_CMD] = 0; 517 s->rregs[ESP_RINTR] |= INTR_BS; 518 esp_raise_irq(s); 519 } 520 break; 521 } 522 break; 523 524 case STAT_CD: 525 cmdlen = fifo8_num_used(&s->cmdfifo); 526 trace_esp_do_dma(cmdlen, len); 527 if (s->dma_memory_read) { 528 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 529 s->dma_memory_read(s->dma_opaque, buf, len); 530 fifo8_push_all(&s->cmdfifo, buf, len); 531 esp_set_tc(s, esp_get_tc(s) - len); 532 } else { 533 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 534 len = MIN(fifo8_num_free(&s->cmdfifo), len); 535 fifo8_push_all(&s->cmdfifo, buf, len); 536 esp_raise_drq(s); 537 } 538 trace_esp_handle_ti_cmd(cmdlen); 539 s->ti_size = 0; 540 if (esp_get_tc(s) == 0) { 541 /* Command has been received */ 542 do_cmd(s); 543 } 544 break; 545 546 case STAT_DO: 547 if (!s->current_req) { 548 return; 549 } 550 if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) { 551 /* Defer until data is available. */ 552 return; 553 } 554 if (len > s->async_len) { 555 len = s->async_len; 556 } 557 558 switch (s->rregs[ESP_CMD]) { 559 case CMD_TI | CMD_DMA: 560 if (s->dma_memory_read) { 561 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 562 esp_set_tc(s, esp_get_tc(s) - len); 563 } else { 564 /* Copy FIFO data to device */ 565 len = MIN(s->async_len, ESP_FIFO_SZ); 566 len = MIN(len, fifo8_num_used(&s->fifo)); 567 len = esp_fifo_pop_buf(s, s->async_buf, len); 568 esp_raise_drq(s); 569 } 570 571 s->async_buf += len; 572 s->async_len -= len; 573 s->ti_size += len; 574 break; 575 576 case CMD_PAD | CMD_DMA: 577 /* Copy TC zero bytes into the incoming stream */ 578 if (!s->dma_memory_read) { 579 len = MIN(s->async_len, ESP_FIFO_SZ); 580 len = MIN(len, fifo8_num_free(&s->fifo)); 581 } 582 583 memset(s->async_buf, 0, len); 584 585 s->async_buf += len; 586 s->async_len -= len; 587 s->ti_size += len; 588 break; 589 } 590 591 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) { 592 /* Defer until the scsi layer has completed */ 593 scsi_req_continue(s->current_req); 594 return; 595 } 596 597 esp_dma_ti_check(s); 598 break; 599 600 case STAT_DI: 601 if (!s->current_req) { 602 return; 603 } 604 if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) { 605 /* Defer until data is available. */ 606 return; 607 } 608 if (len > s->async_len) { 609 len = s->async_len; 610 } 611 612 switch (s->rregs[ESP_CMD]) { 613 case CMD_TI | CMD_DMA: 614 if (s->dma_memory_write) { 615 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 616 } else { 617 /* Copy device data to FIFO */ 618 len = MIN(len, fifo8_num_free(&s->fifo)); 619 esp_fifo_push_buf(s, s->async_buf, len); 620 esp_raise_drq(s); 621 } 622 623 s->async_buf += len; 624 s->async_len -= len; 625 s->ti_size -= len; 626 esp_set_tc(s, esp_get_tc(s) - len); 627 break; 628 629 case CMD_PAD | CMD_DMA: 630 /* Drop TC bytes from the incoming stream */ 631 if (!s->dma_memory_write) { 632 len = MIN(len, fifo8_num_free(&s->fifo)); 633 } 634 635 s->async_buf += len; 636 s->async_len -= len; 637 s->ti_size -= len; 638 esp_set_tc(s, esp_get_tc(s) - len); 639 break; 640 } 641 642 if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) { 643 /* If the guest underflows TC then terminate SCSI request */ 644 scsi_req_continue(s->current_req); 645 return; 646 } 647 648 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) { 649 /* Defer until the scsi layer has completed */ 650 scsi_req_continue(s->current_req); 651 return; 652 } 653 654 esp_dma_ti_check(s); 655 break; 656 657 case STAT_ST: 658 switch (s->rregs[ESP_CMD]) { 659 case CMD_ICCS | CMD_DMA: 660 len = MIN(len, 1); 661 662 if (len) { 663 buf[0] = s->status; 664 665 if (s->dma_memory_write) { 666 s->dma_memory_write(s->dma_opaque, buf, len); 667 } else { 668 esp_fifo_push_buf(s, buf, len); 669 } 670 671 esp_set_tc(s, esp_get_tc(s) - len); 672 esp_set_phase(s, STAT_MI); 673 674 if (esp_get_tc(s) > 0) { 675 /* Process any message in phase data */ 676 esp_do_dma(s); 677 } 678 } 679 break; 680 681 default: 682 /* Consume remaining data if the guest underflows TC */ 683 if (fifo8_num_used(&s->fifo) < 2) { 684 s->rregs[ESP_RINTR] |= INTR_BS; 685 esp_raise_irq(s); 686 esp_lower_drq(s); 687 } 688 break; 689 } 690 break; 691 692 case STAT_MI: 693 switch (s->rregs[ESP_CMD]) { 694 case CMD_ICCS | CMD_DMA: 695 len = MIN(len, 1); 696 697 if (len) { 698 buf[0] = 0; 699 700 if (s->dma_memory_write) { 701 s->dma_memory_write(s->dma_opaque, buf, len); 702 } else { 703 esp_fifo_push_buf(s, buf, len); 704 } 705 706 esp_set_tc(s, esp_get_tc(s) - len); 707 708 /* Raise end of command interrupt */ 709 s->rregs[ESP_RINTR] |= INTR_FC; 710 esp_raise_irq(s); 711 } 712 break; 713 } 714 break; 715 } 716 } 717 718 static void esp_nodma_ti_dataout(ESPState *s) 719 { 720 int len; 721 722 if (!s->current_req) { 723 return; 724 } 725 if (s->async_len == 0) { 726 /* Defer until data is available. */ 727 return; 728 } 729 len = MIN(s->async_len, ESP_FIFO_SZ); 730 len = MIN(len, fifo8_num_used(&s->fifo)); 731 esp_fifo_pop_buf(s, s->async_buf, len); 732 s->async_buf += len; 733 s->async_len -= len; 734 s->ti_size += len; 735 736 if (s->async_len == 0) { 737 scsi_req_continue(s->current_req); 738 return; 739 } 740 741 s->rregs[ESP_RINTR] |= INTR_BS; 742 esp_raise_irq(s); 743 } 744 745 static void esp_do_nodma(ESPState *s) 746 { 747 uint8_t buf[ESP_FIFO_SZ]; 748 uint32_t cmdlen; 749 int len; 750 751 switch (esp_get_phase(s)) { 752 case STAT_MO: 753 switch (s->rregs[ESP_CMD]) { 754 case CMD_SELATN: 755 /* Copy FIFO into cmdfifo */ 756 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 757 len = MIN(fifo8_num_free(&s->cmdfifo), len); 758 fifo8_push_all(&s->cmdfifo, buf, len); 759 760 if (fifo8_num_used(&s->cmdfifo) >= 1) { 761 /* First byte received, switch to command phase */ 762 esp_set_phase(s, STAT_CD); 763 s->rregs[ESP_RSEQ] = SEQ_CD; 764 s->cmdfifo_cdb_offset = 1; 765 766 if (fifo8_num_used(&s->cmdfifo) > 1) { 767 /* Process any additional command phase data */ 768 esp_do_nodma(s); 769 } 770 } 771 break; 772 773 case CMD_SELATNS: 774 /* Copy one byte from FIFO into cmdfifo */ 775 len = esp_fifo_pop_buf(s, buf, 776 MIN(fifo8_num_used(&s->fifo), 1)); 777 len = MIN(fifo8_num_free(&s->cmdfifo), len); 778 fifo8_push_all(&s->cmdfifo, buf, len); 779 780 if (fifo8_num_used(&s->cmdfifo) >= 1) { 781 /* First byte received, stop in message out phase */ 782 s->rregs[ESP_RSEQ] = SEQ_MO; 783 s->cmdfifo_cdb_offset = 1; 784 785 /* Raise command completion interrupt */ 786 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 787 esp_raise_irq(s); 788 } 789 break; 790 791 case CMD_TI: 792 /* Copy FIFO into cmdfifo */ 793 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 794 len = MIN(fifo8_num_free(&s->cmdfifo), len); 795 fifo8_push_all(&s->cmdfifo, buf, len); 796 797 /* ATN remains asserted until FIFO empty */ 798 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 799 esp_set_phase(s, STAT_CD); 800 s->rregs[ESP_CMD] = 0; 801 s->rregs[ESP_RINTR] |= INTR_BS; 802 esp_raise_irq(s); 803 break; 804 } 805 break; 806 807 case STAT_CD: 808 switch (s->rregs[ESP_CMD]) { 809 case CMD_TI: 810 /* Copy FIFO into cmdfifo */ 811 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 812 len = MIN(fifo8_num_free(&s->cmdfifo), len); 813 fifo8_push_all(&s->cmdfifo, buf, len); 814 815 cmdlen = fifo8_num_used(&s->cmdfifo); 816 trace_esp_handle_ti_cmd(cmdlen); 817 818 /* CDB may be transferred in one or more TI commands */ 819 if (esp_cdb_ready(s)) { 820 /* Command has been received */ 821 do_cmd(s); 822 } else { 823 /* 824 * If data was transferred from the FIFO then raise bus 825 * service interrupt to indicate transfer complete. Otherwise 826 * defer until the next FIFO write. 827 */ 828 if (len) { 829 /* Raise interrupt to indicate transfer complete */ 830 s->rregs[ESP_RINTR] |= INTR_BS; 831 esp_raise_irq(s); 832 } 833 } 834 break; 835 836 case CMD_SEL | CMD_DMA: 837 case CMD_SELATN | CMD_DMA: 838 /* Copy FIFO into cmdfifo */ 839 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 840 len = MIN(fifo8_num_free(&s->cmdfifo), len); 841 fifo8_push_all(&s->cmdfifo, buf, len); 842 843 /* Handle when DMA transfer is terminated by non-DMA FIFO write */ 844 if (esp_cdb_ready(s)) { 845 /* Command has been received */ 846 do_cmd(s); 847 } 848 break; 849 850 case CMD_SEL: 851 case CMD_SELATN: 852 /* FIFO already contain entire CDB: copy to cmdfifo and execute */ 853 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo)); 854 len = MIN(fifo8_num_free(&s->cmdfifo), len); 855 fifo8_push_all(&s->cmdfifo, buf, len); 856 857 do_cmd(s); 858 break; 859 } 860 break; 861 862 case STAT_DO: 863 /* Accumulate data in FIFO until non-DMA TI is executed */ 864 break; 865 866 case STAT_DI: 867 if (!s->current_req) { 868 return; 869 } 870 if (s->async_len == 0) { 871 /* Defer until data is available. */ 872 return; 873 } 874 if (fifo8_is_empty(&s->fifo)) { 875 esp_fifo_push(s, s->async_buf[0]); 876 s->async_buf++; 877 s->async_len--; 878 s->ti_size--; 879 } 880 881 if (s->async_len == 0) { 882 scsi_req_continue(s->current_req); 883 return; 884 } 885 886 /* If preloading the FIFO, defer until TI command issued */ 887 if (s->rregs[ESP_CMD] != CMD_TI) { 888 return; 889 } 890 891 s->rregs[ESP_RINTR] |= INTR_BS; 892 esp_raise_irq(s); 893 break; 894 895 case STAT_ST: 896 switch (s->rregs[ESP_CMD]) { 897 case CMD_ICCS: 898 esp_fifo_push(s, s->status); 899 esp_set_phase(s, STAT_MI); 900 901 /* Process any message in phase data */ 902 esp_do_nodma(s); 903 break; 904 } 905 break; 906 907 case STAT_MI: 908 switch (s->rregs[ESP_CMD]) { 909 case CMD_ICCS: 910 esp_fifo_push(s, 0); 911 912 /* Raise end of command interrupt */ 913 s->rregs[ESP_RINTR] |= INTR_FC; 914 esp_raise_irq(s); 915 break; 916 } 917 break; 918 } 919 } 920 921 void esp_command_complete(SCSIRequest *req, size_t resid) 922 { 923 ESPState *s = req->hba_private; 924 int to_device = (esp_get_phase(s) == STAT_DO); 925 926 trace_esp_command_complete(); 927 928 /* 929 * Non-DMA transfers from the target will leave the last byte in 930 * the FIFO so don't reset ti_size in this case 931 */ 932 if (s->dma || to_device) { 933 if (s->ti_size != 0) { 934 trace_esp_command_complete_unexpected(); 935 } 936 } 937 938 s->async_len = 0; 939 if (req->status) { 940 trace_esp_command_complete_fail(); 941 } 942 s->status = req->status; 943 944 /* 945 * Switch to status phase. For non-DMA transfers from the target the last 946 * byte is still in the FIFO 947 */ 948 s->ti_size = 0; 949 950 switch (s->rregs[ESP_CMD]) { 951 case CMD_SEL | CMD_DMA: 952 case CMD_SEL: 953 case CMD_SELATN | CMD_DMA: 954 case CMD_SELATN: 955 /* 956 * No data phase for sequencer command so raise deferred bus service 957 * and function complete interrupt 958 */ 959 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 960 s->rregs[ESP_RSEQ] = SEQ_CD; 961 break; 962 963 case CMD_TI | CMD_DMA: 964 case CMD_TI: 965 s->rregs[ESP_CMD] = 0; 966 break; 967 } 968 969 /* Raise bus service interrupt to indicate change to STATUS phase */ 970 esp_set_phase(s, STAT_ST); 971 s->rregs[ESP_RINTR] |= INTR_BS; 972 esp_raise_irq(s); 973 974 /* Ensure DRQ is set correctly for TC underflow or normal completion */ 975 esp_dma_ti_check(s); 976 977 if (s->current_req) { 978 scsi_req_unref(s->current_req); 979 s->current_req = NULL; 980 s->current_dev = NULL; 981 } 982 } 983 984 void esp_transfer_data(SCSIRequest *req, uint32_t len) 985 { 986 ESPState *s = req->hba_private; 987 uint32_t dmalen = esp_get_tc(s); 988 989 trace_esp_transfer_data(dmalen, s->ti_size); 990 s->async_len = len; 991 s->async_buf = scsi_req_get_buf(req); 992 993 if (!s->data_ready) { 994 s->data_ready = true; 995 996 switch (s->rregs[ESP_CMD]) { 997 case CMD_SEL | CMD_DMA: 998 case CMD_SEL: 999 case CMD_SELATN | CMD_DMA: 1000 case CMD_SELATN: 1001 /* 1002 * Initial incoming data xfer is complete for sequencer command 1003 * so raise deferred bus service and function complete interrupt 1004 */ 1005 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 1006 s->rregs[ESP_RSEQ] = SEQ_CD; 1007 break; 1008 1009 case CMD_SELATNS | CMD_DMA: 1010 case CMD_SELATNS: 1011 /* 1012 * Initial incoming data xfer is complete so raise command 1013 * completion interrupt 1014 */ 1015 s->rregs[ESP_RINTR] |= INTR_BS; 1016 s->rregs[ESP_RSEQ] = SEQ_MO; 1017 break; 1018 1019 case CMD_TI | CMD_DMA: 1020 case CMD_TI: 1021 /* 1022 * Bus service interrupt raised because of initial change to 1023 * DATA phase 1024 */ 1025 s->rregs[ESP_CMD] = 0; 1026 s->rregs[ESP_RINTR] |= INTR_BS; 1027 break; 1028 } 1029 1030 esp_raise_irq(s); 1031 } 1032 1033 /* 1034 * Always perform the initial transfer upon reception of the next TI 1035 * command to ensure the DMA/non-DMA status of the command is correct. 1036 * It is not possible to use s->dma directly in the section below as 1037 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 1038 * async data transfer is delayed then s->dma is set incorrectly. 1039 */ 1040 1041 if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) { 1042 /* When the SCSI layer returns more data, raise deferred INTR_BS */ 1043 esp_dma_ti_check(s); 1044 1045 esp_do_dma(s); 1046 } else if (s->rregs[ESP_CMD] == CMD_TI) { 1047 esp_do_nodma(s); 1048 } 1049 } 1050 1051 static void handle_ti(ESPState *s) 1052 { 1053 uint32_t dmalen; 1054 1055 if (s->dma && !s->dma_enabled) { 1056 s->dma_cb = handle_ti; 1057 return; 1058 } 1059 1060 if (s->dma) { 1061 dmalen = esp_get_tc(s); 1062 trace_esp_handle_ti(dmalen); 1063 esp_do_dma(s); 1064 } else { 1065 trace_esp_handle_ti(s->ti_size); 1066 esp_do_nodma(s); 1067 1068 if (esp_get_phase(s) == STAT_DO) { 1069 esp_nodma_ti_dataout(s); 1070 } 1071 } 1072 } 1073 1074 void esp_hard_reset(ESPState *s) 1075 { 1076 memset(s->rregs, 0, ESP_REGS); 1077 memset(s->wregs, 0, ESP_REGS); 1078 s->tchi_written = 0; 1079 s->ti_size = 0; 1080 s->async_len = 0; 1081 fifo8_reset(&s->fifo); 1082 fifo8_reset(&s->cmdfifo); 1083 s->dma = 0; 1084 s->dma_cb = NULL; 1085 1086 s->rregs[ESP_CFG1] = 7; 1087 } 1088 1089 static void esp_soft_reset(ESPState *s) 1090 { 1091 qemu_irq_lower(s->irq); 1092 qemu_irq_lower(s->drq_irq); 1093 esp_hard_reset(s); 1094 } 1095 1096 static void esp_bus_reset(ESPState *s) 1097 { 1098 bus_cold_reset(BUS(&s->bus)); 1099 } 1100 1101 static void parent_esp_reset(ESPState *s, int irq, int level) 1102 { 1103 if (level) { 1104 esp_soft_reset(s); 1105 } 1106 } 1107 1108 static void esp_run_cmd(ESPState *s) 1109 { 1110 uint8_t cmd = s->rregs[ESP_CMD]; 1111 1112 if (cmd & CMD_DMA) { 1113 s->dma = 1; 1114 /* Reload DMA counter. */ 1115 if (esp_get_stc(s) == 0) { 1116 esp_set_tc(s, 0x10000); 1117 } else { 1118 esp_set_tc(s, esp_get_stc(s)); 1119 } 1120 } else { 1121 s->dma = 0; 1122 } 1123 switch (cmd & CMD_CMD) { 1124 case CMD_NOP: 1125 trace_esp_mem_writeb_cmd_nop(cmd); 1126 break; 1127 case CMD_FLUSH: 1128 trace_esp_mem_writeb_cmd_flush(cmd); 1129 fifo8_reset(&s->fifo); 1130 break; 1131 case CMD_RESET: 1132 trace_esp_mem_writeb_cmd_reset(cmd); 1133 esp_soft_reset(s); 1134 break; 1135 case CMD_BUSRESET: 1136 trace_esp_mem_writeb_cmd_bus_reset(cmd); 1137 esp_bus_reset(s); 1138 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1139 s->rregs[ESP_RINTR] |= INTR_RST; 1140 esp_raise_irq(s); 1141 } 1142 break; 1143 case CMD_TI: 1144 trace_esp_mem_writeb_cmd_ti(cmd); 1145 handle_ti(s); 1146 break; 1147 case CMD_ICCS: 1148 trace_esp_mem_writeb_cmd_iccs(cmd); 1149 write_response(s); 1150 break; 1151 case CMD_MSGACC: 1152 trace_esp_mem_writeb_cmd_msgacc(cmd); 1153 s->rregs[ESP_RINTR] |= INTR_DC; 1154 s->rregs[ESP_RSEQ] = 0; 1155 s->rregs[ESP_RFLAGS] = 0; 1156 esp_raise_irq(s); 1157 break; 1158 case CMD_PAD: 1159 trace_esp_mem_writeb_cmd_pad(cmd); 1160 handle_pad(s); 1161 break; 1162 case CMD_SATN: 1163 trace_esp_mem_writeb_cmd_satn(cmd); 1164 break; 1165 case CMD_RSTATN: 1166 trace_esp_mem_writeb_cmd_rstatn(cmd); 1167 break; 1168 case CMD_SEL: 1169 trace_esp_mem_writeb_cmd_sel(cmd); 1170 handle_s_without_atn(s); 1171 break; 1172 case CMD_SELATN: 1173 trace_esp_mem_writeb_cmd_selatn(cmd); 1174 handle_satn(s); 1175 break; 1176 case CMD_SELATNS: 1177 trace_esp_mem_writeb_cmd_selatns(cmd); 1178 handle_satn_stop(s); 1179 break; 1180 case CMD_ENSEL: 1181 trace_esp_mem_writeb_cmd_ensel(cmd); 1182 s->rregs[ESP_RINTR] = 0; 1183 break; 1184 case CMD_DISSEL: 1185 trace_esp_mem_writeb_cmd_dissel(cmd); 1186 s->rregs[ESP_RINTR] = 0; 1187 esp_raise_irq(s); 1188 break; 1189 default: 1190 trace_esp_error_unhandled_command(cmd); 1191 break; 1192 } 1193 } 1194 1195 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 1196 { 1197 uint32_t val; 1198 1199 switch (saddr) { 1200 case ESP_FIFO: 1201 s->rregs[ESP_FIFO] = esp_fifo_pop(s); 1202 val = s->rregs[ESP_FIFO]; 1203 break; 1204 case ESP_RINTR: 1205 /* 1206 * Clear sequence step, interrupt register and all status bits 1207 * except TC 1208 */ 1209 val = s->rregs[ESP_RINTR]; 1210 s->rregs[ESP_RINTR] = 0; 1211 esp_lower_irq(s); 1212 s->rregs[ESP_RSTAT] &= STAT_TC | 7; 1213 /* 1214 * According to the datasheet ESP_RSEQ should be cleared, but as the 1215 * emulation currently defers information transfers to the next TI 1216 * command leave it for now so that pedantic guests such as the old 1217 * Linux 2.6 driver see the correct flags before the next SCSI phase 1218 * transition. 1219 * 1220 * s->rregs[ESP_RSEQ] = SEQ_0; 1221 */ 1222 break; 1223 case ESP_TCHI: 1224 /* Return the unique id if the value has never been written */ 1225 if (!s->tchi_written) { 1226 val = s->chip_id; 1227 } else { 1228 val = s->rregs[saddr]; 1229 } 1230 break; 1231 case ESP_RFLAGS: 1232 /* Bottom 5 bits indicate number of bytes in FIFO */ 1233 val = fifo8_num_used(&s->fifo); 1234 break; 1235 default: 1236 val = s->rregs[saddr]; 1237 break; 1238 } 1239 1240 trace_esp_mem_readb(saddr, val); 1241 return val; 1242 } 1243 1244 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 1245 { 1246 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 1247 switch (saddr) { 1248 case ESP_TCHI: 1249 s->tchi_written = true; 1250 /* fall through */ 1251 case ESP_TCLO: 1252 case ESP_TCMID: 1253 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1254 break; 1255 case ESP_FIFO: 1256 if (!fifo8_is_full(&s->fifo)) { 1257 esp_fifo_push(s, val); 1258 } 1259 esp_do_nodma(s); 1260 break; 1261 case ESP_CMD: 1262 s->rregs[saddr] = val; 1263 esp_run_cmd(s); 1264 break; 1265 case ESP_WBUSID ... ESP_WSYNO: 1266 break; 1267 case ESP_CFG1: 1268 case ESP_CFG2: case ESP_CFG3: 1269 case ESP_RES3: case ESP_RES4: 1270 s->rregs[saddr] = val; 1271 break; 1272 case ESP_WCCF ... ESP_WTEST: 1273 break; 1274 default: 1275 trace_esp_error_invalid_write(val, saddr); 1276 return; 1277 } 1278 s->wregs[saddr] = val; 1279 } 1280 1281 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1282 unsigned size, bool is_write, 1283 MemTxAttrs attrs) 1284 { 1285 return (size == 1) || (is_write && size == 4); 1286 } 1287 1288 static bool esp_is_before_version_5(void *opaque, int version_id) 1289 { 1290 ESPState *s = ESP(opaque); 1291 1292 version_id = MIN(version_id, s->mig_version_id); 1293 return version_id < 5; 1294 } 1295 1296 static bool esp_is_version_5(void *opaque, int version_id) 1297 { 1298 ESPState *s = ESP(opaque); 1299 1300 version_id = MIN(version_id, s->mig_version_id); 1301 return version_id >= 5; 1302 } 1303 1304 static bool esp_is_version_6(void *opaque, int version_id) 1305 { 1306 ESPState *s = ESP(opaque); 1307 1308 version_id = MIN(version_id, s->mig_version_id); 1309 return version_id >= 6; 1310 } 1311 1312 static bool esp_is_between_version_5_and_6(void *opaque, int version_id) 1313 { 1314 ESPState *s = ESP(opaque); 1315 1316 version_id = MIN(version_id, s->mig_version_id); 1317 return version_id >= 5 && version_id <= 6; 1318 } 1319 1320 int esp_pre_save(void *opaque) 1321 { 1322 ESPState *s = ESP(object_resolve_path_component( 1323 OBJECT(opaque), "esp")); 1324 1325 s->mig_version_id = vmstate_esp.version_id; 1326 return 0; 1327 } 1328 1329 static int esp_post_load(void *opaque, int version_id) 1330 { 1331 ESPState *s = ESP(opaque); 1332 int len, i; 1333 1334 version_id = MIN(version_id, s->mig_version_id); 1335 1336 if (version_id < 5) { 1337 esp_set_tc(s, s->mig_dma_left); 1338 1339 /* Migrate ti_buf to fifo */ 1340 len = s->mig_ti_wptr - s->mig_ti_rptr; 1341 for (i = 0; i < len; i++) { 1342 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1343 } 1344 1345 /* Migrate cmdbuf to cmdfifo */ 1346 for (i = 0; i < s->mig_cmdlen; i++) { 1347 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1348 } 1349 } 1350 1351 s->mig_version_id = vmstate_esp.version_id; 1352 return 0; 1353 } 1354 1355 const VMStateDescription vmstate_esp = { 1356 .name = "esp", 1357 .version_id = 7, 1358 .minimum_version_id = 3, 1359 .post_load = esp_post_load, 1360 .fields = (const VMStateField[]) { 1361 VMSTATE_BUFFER(rregs, ESPState), 1362 VMSTATE_BUFFER(wregs, ESPState), 1363 VMSTATE_INT32(ti_size, ESPState), 1364 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1365 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1366 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1367 VMSTATE_UINT32(status, ESPState), 1368 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1369 esp_is_before_version_5), 1370 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1371 esp_is_before_version_5), 1372 VMSTATE_UINT32(dma, ESPState), 1373 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1374 esp_is_before_version_5, 0, 16), 1375 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1376 esp_is_before_version_5, 16, 1377 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1378 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1379 VMSTATE_UINT32(do_cmd, ESPState), 1380 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1381 VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5), 1382 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1383 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1384 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1385 VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState, 1386 esp_is_between_version_5_and_6), 1387 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1388 VMSTATE_BOOL(drq_state, ESPState), 1389 VMSTATE_END_OF_LIST() 1390 }, 1391 }; 1392 1393 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1394 uint64_t val, unsigned int size) 1395 { 1396 SysBusESPState *sysbus = opaque; 1397 ESPState *s = ESP(&sysbus->esp); 1398 uint32_t saddr; 1399 1400 saddr = addr >> sysbus->it_shift; 1401 esp_reg_write(s, saddr, val); 1402 } 1403 1404 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1405 unsigned int size) 1406 { 1407 SysBusESPState *sysbus = opaque; 1408 ESPState *s = ESP(&sysbus->esp); 1409 uint32_t saddr; 1410 1411 saddr = addr >> sysbus->it_shift; 1412 return esp_reg_read(s, saddr); 1413 } 1414 1415 static const MemoryRegionOps sysbus_esp_mem_ops = { 1416 .read = sysbus_esp_mem_read, 1417 .write = sysbus_esp_mem_write, 1418 .endianness = DEVICE_NATIVE_ENDIAN, 1419 .valid.accepts = esp_mem_accepts, 1420 }; 1421 1422 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1423 uint64_t val, unsigned int size) 1424 { 1425 SysBusESPState *sysbus = opaque; 1426 ESPState *s = ESP(&sysbus->esp); 1427 1428 trace_esp_pdma_write(size); 1429 1430 switch (size) { 1431 case 1: 1432 esp_pdma_write(s, val); 1433 break; 1434 case 2: 1435 esp_pdma_write(s, val >> 8); 1436 esp_pdma_write(s, val); 1437 break; 1438 } 1439 esp_do_dma(s); 1440 } 1441 1442 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1443 unsigned int size) 1444 { 1445 SysBusESPState *sysbus = opaque; 1446 ESPState *s = ESP(&sysbus->esp); 1447 uint64_t val = 0; 1448 1449 trace_esp_pdma_read(size); 1450 1451 switch (size) { 1452 case 1: 1453 val = esp_pdma_read(s); 1454 break; 1455 case 2: 1456 val = esp_pdma_read(s); 1457 val = (val << 8) | esp_pdma_read(s); 1458 break; 1459 } 1460 esp_do_dma(s); 1461 return val; 1462 } 1463 1464 static void *esp_load_request(QEMUFile *f, SCSIRequest *req) 1465 { 1466 ESPState *s = container_of(req->bus, ESPState, bus); 1467 1468 scsi_req_ref(req); 1469 s->current_req = req; 1470 return s; 1471 } 1472 1473 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1474 .read = sysbus_esp_pdma_read, 1475 .write = sysbus_esp_pdma_write, 1476 .endianness = DEVICE_NATIVE_ENDIAN, 1477 .valid.min_access_size = 1, 1478 .valid.max_access_size = 4, 1479 .impl.min_access_size = 1, 1480 .impl.max_access_size = 2, 1481 }; 1482 1483 static const struct SCSIBusInfo esp_scsi_info = { 1484 .tcq = false, 1485 .max_target = ESP_MAX_DEVS, 1486 .max_lun = 7, 1487 1488 .load_request = esp_load_request, 1489 .transfer_data = esp_transfer_data, 1490 .complete = esp_command_complete, 1491 .cancel = esp_request_cancelled 1492 }; 1493 1494 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1495 { 1496 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1497 ESPState *s = ESP(&sysbus->esp); 1498 1499 switch (irq) { 1500 case 0: 1501 parent_esp_reset(s, irq, level); 1502 break; 1503 case 1: 1504 esp_dma_enable(s, irq, level); 1505 break; 1506 } 1507 } 1508 1509 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1510 { 1511 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1512 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1513 ESPState *s = ESP(&sysbus->esp); 1514 1515 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1516 return; 1517 } 1518 1519 sysbus_init_irq(sbd, &s->irq); 1520 sysbus_init_irq(sbd, &s->drq_irq); 1521 assert(sysbus->it_shift != -1); 1522 1523 s->chip_id = TCHI_FAS100A; 1524 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1525 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1526 sysbus_init_mmio(sbd, &sysbus->iomem); 1527 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1528 sysbus, "esp-pdma", 4); 1529 sysbus_init_mmio(sbd, &sysbus->pdma); 1530 1531 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1532 1533 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); 1534 } 1535 1536 static void sysbus_esp_hard_reset(DeviceState *dev) 1537 { 1538 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1539 ESPState *s = ESP(&sysbus->esp); 1540 1541 esp_hard_reset(s); 1542 } 1543 1544 static void sysbus_esp_init(Object *obj) 1545 { 1546 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1547 1548 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1549 } 1550 1551 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1552 .name = "sysbusespscsi", 1553 .version_id = 2, 1554 .minimum_version_id = 1, 1555 .pre_save = esp_pre_save, 1556 .fields = (const VMStateField[]) { 1557 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1558 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1559 VMSTATE_END_OF_LIST() 1560 } 1561 }; 1562 1563 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1564 { 1565 DeviceClass *dc = DEVICE_CLASS(klass); 1566 1567 dc->realize = sysbus_esp_realize; 1568 dc->reset = sysbus_esp_hard_reset; 1569 dc->vmsd = &vmstate_sysbus_esp_scsi; 1570 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1571 } 1572 1573 static void esp_finalize(Object *obj) 1574 { 1575 ESPState *s = ESP(obj); 1576 1577 fifo8_destroy(&s->fifo); 1578 fifo8_destroy(&s->cmdfifo); 1579 } 1580 1581 static void esp_init(Object *obj) 1582 { 1583 ESPState *s = ESP(obj); 1584 1585 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1586 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1587 } 1588 1589 static void esp_class_init(ObjectClass *klass, void *data) 1590 { 1591 DeviceClass *dc = DEVICE_CLASS(klass); 1592 1593 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1594 dc->user_creatable = false; 1595 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1596 } 1597 1598 static const TypeInfo esp_info_types[] = { 1599 { 1600 .name = TYPE_SYSBUS_ESP, 1601 .parent = TYPE_SYS_BUS_DEVICE, 1602 .instance_init = sysbus_esp_init, 1603 .instance_size = sizeof(SysBusESPState), 1604 .class_init = sysbus_esp_class_init, 1605 }, 1606 { 1607 .name = TYPE_ESP, 1608 .parent = TYPE_DEVICE, 1609 .instance_init = esp_init, 1610 .instance_finalize = esp_finalize, 1611 .instance_size = sizeof(ESPState), 1612 .class_init = esp_class_init, 1613 }, 1614 }; 1615 1616 DEFINE_TYPES(esp_info_types) 1617