1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 s->async_len = 0; 99 } 100 } 101 102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 103 { 104 if (fifo8_num_used(fifo) == fifo->capacity) { 105 trace_esp_error_fifo_overrun(); 106 return; 107 } 108 109 fifo8_push(fifo, val); 110 } 111 112 static uint8_t esp_fifo_pop(Fifo8 *fifo) 113 { 114 if (fifo8_is_empty(fifo)) { 115 return 0; 116 } 117 118 return fifo8_pop(fifo); 119 } 120 121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 122 { 123 const uint8_t *buf; 124 uint32_t n, n2; 125 int len; 126 127 if (maxlen == 0) { 128 return 0; 129 } 130 131 len = maxlen; 132 buf = fifo8_pop_buf(fifo, len, &n); 133 if (dest) { 134 memcpy(dest, buf, n); 135 } 136 137 /* Add FIFO wraparound if needed */ 138 len -= n; 139 len = MIN(len, fifo8_num_used(fifo)); 140 if (len) { 141 buf = fifo8_pop_buf(fifo, len, &n2); 142 if (dest) { 143 memcpy(&dest[n], buf, n2); 144 } 145 n += n2; 146 } 147 148 return n; 149 } 150 151 static uint32_t esp_get_tc(ESPState *s) 152 { 153 uint32_t dmalen; 154 155 dmalen = s->rregs[ESP_TCLO]; 156 dmalen |= s->rregs[ESP_TCMID] << 8; 157 dmalen |= s->rregs[ESP_TCHI] << 16; 158 159 return dmalen; 160 } 161 162 static void esp_set_tc(ESPState *s, uint32_t dmalen) 163 { 164 uint32_t old_tc = esp_get_tc(s); 165 166 s->rregs[ESP_TCLO] = dmalen; 167 s->rregs[ESP_TCMID] = dmalen >> 8; 168 s->rregs[ESP_TCHI] = dmalen >> 16; 169 170 if (old_tc && dmalen == 0) { 171 s->rregs[ESP_RSTAT] |= STAT_TC; 172 } 173 } 174 175 static uint32_t esp_get_stc(ESPState *s) 176 { 177 uint32_t dmalen; 178 179 dmalen = s->wregs[ESP_TCLO]; 180 dmalen |= s->wregs[ESP_TCMID] << 8; 181 dmalen |= s->wregs[ESP_TCHI] << 16; 182 183 return dmalen; 184 } 185 186 static const char *esp_phase_names[8] = { 187 "DATA OUT", "DATA IN", "COMMAND", "STATUS", 188 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN" 189 }; 190 191 static void esp_set_phase(ESPState *s, uint8_t phase) 192 { 193 s->rregs[ESP_RSTAT] &= ~7; 194 s->rregs[ESP_RSTAT] |= phase; 195 196 trace_esp_set_phase(esp_phase_names[phase]); 197 } 198 199 static uint8_t esp_pdma_read(ESPState *s) 200 { 201 uint8_t val; 202 203 val = esp_fifo_pop(&s->fifo); 204 return val; 205 } 206 207 static void esp_pdma_write(ESPState *s, uint8_t val) 208 { 209 uint32_t dmalen = esp_get_tc(s); 210 211 if (dmalen == 0) { 212 return; 213 } 214 215 esp_fifo_push(&s->fifo, val); 216 217 dmalen--; 218 esp_set_tc(s, dmalen); 219 } 220 221 static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb) 222 { 223 s->pdma_cb = cb; 224 } 225 226 static int esp_select(ESPState *s) 227 { 228 int target; 229 230 target = s->wregs[ESP_WBUSID] & BUSID_DID; 231 232 s->ti_size = 0; 233 234 if (s->current_req) { 235 /* Started a new command before the old one finished. Cancel it. */ 236 scsi_req_cancel(s->current_req); 237 } 238 239 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 240 if (!s->current_dev) { 241 /* No such drive */ 242 s->rregs[ESP_RSTAT] = 0; 243 s->rregs[ESP_RINTR] = INTR_DC; 244 s->rregs[ESP_RSEQ] = SEQ_0; 245 esp_raise_irq(s); 246 return -1; 247 } 248 249 /* 250 * Note that we deliberately don't raise the IRQ here: this will be done 251 * either in do_command_phase() for DATA OUT transfers or by the deferred 252 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 253 */ 254 s->rregs[ESP_RINTR] |= INTR_FC; 255 s->rregs[ESP_RSEQ] = SEQ_CD; 256 return 0; 257 } 258 259 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 260 { 261 uint8_t buf[ESP_CMDFIFO_SZ]; 262 uint32_t dmalen, n; 263 int target; 264 265 target = s->wregs[ESP_WBUSID] & BUSID_DID; 266 if (s->dma) { 267 dmalen = MIN(esp_get_tc(s), maxlen); 268 if (dmalen == 0) { 269 return 0; 270 } 271 if (s->dma_memory_read) { 272 s->dma_memory_read(s->dma_opaque, buf, dmalen); 273 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); 274 fifo8_push_all(&s->cmdfifo, buf, dmalen); 275 esp_set_tc(s, esp_get_tc(s) - dmalen); 276 } else { 277 return 0; 278 } 279 } else { 280 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 281 if (dmalen == 0) { 282 return 0; 283 } 284 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 285 n = MIN(fifo8_num_free(&s->cmdfifo), n); 286 fifo8_push_all(&s->cmdfifo, buf, n); 287 } 288 trace_esp_get_cmd(dmalen, target); 289 290 return dmalen; 291 } 292 293 static void do_command_phase(ESPState *s) 294 { 295 uint32_t cmdlen; 296 int32_t datalen; 297 SCSIDevice *current_lun; 298 uint8_t buf[ESP_CMDFIFO_SZ]; 299 300 trace_esp_do_command_phase(s->lun); 301 cmdlen = fifo8_num_used(&s->cmdfifo); 302 if (!cmdlen || !s->current_dev) { 303 return; 304 } 305 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 306 307 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 308 if (!current_lun) { 309 /* No such drive */ 310 s->rregs[ESP_RSTAT] = 0; 311 s->rregs[ESP_RINTR] = INTR_DC; 312 s->rregs[ESP_RSEQ] = SEQ_0; 313 esp_raise_irq(s); 314 return; 315 } 316 317 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); 318 datalen = scsi_req_enqueue(s->current_req); 319 s->ti_size = datalen; 320 fifo8_reset(&s->cmdfifo); 321 if (datalen != 0) { 322 s->rregs[ESP_RSTAT] = STAT_TC; 323 s->rregs[ESP_RSEQ] = SEQ_CD; 324 s->ti_cmd = 0; 325 esp_set_tc(s, 0); 326 if (datalen > 0) { 327 /* 328 * Switch to DATA IN phase but wait until initial data xfer is 329 * complete before raising the command completion interrupt 330 */ 331 s->data_in_ready = false; 332 esp_set_phase(s, STAT_DI); 333 } else { 334 esp_set_phase(s, STAT_DO); 335 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 336 esp_raise_irq(s); 337 esp_lower_drq(s); 338 } 339 scsi_req_continue(s->current_req); 340 return; 341 } 342 } 343 344 static void do_message_phase(ESPState *s) 345 { 346 if (s->cmdfifo_cdb_offset) { 347 uint8_t message = esp_fifo_pop(&s->cmdfifo); 348 349 trace_esp_do_identify(message); 350 s->lun = message & 7; 351 s->cmdfifo_cdb_offset--; 352 } 353 354 /* Ignore extended messages for now */ 355 if (s->cmdfifo_cdb_offset) { 356 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 357 esp_fifo_pop_buf(&s->cmdfifo, NULL, len); 358 s->cmdfifo_cdb_offset = 0; 359 } 360 } 361 362 static void do_cmd(ESPState *s) 363 { 364 do_message_phase(s); 365 assert(s->cmdfifo_cdb_offset == 0); 366 do_command_phase(s); 367 } 368 369 static void satn_pdma_cb(ESPState *s) 370 { 371 uint8_t buf[ESP_FIFO_SZ]; 372 int n; 373 374 /* Copy FIFO into cmdfifo */ 375 n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo)); 376 n = MIN(fifo8_num_free(&s->cmdfifo), n); 377 fifo8_push_all(&s->cmdfifo, buf, n); 378 379 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 380 s->cmdfifo_cdb_offset = 1; 381 s->do_cmd = 0; 382 do_cmd(s); 383 } 384 } 385 386 static void handle_satn(ESPState *s) 387 { 388 int32_t cmdlen; 389 390 if (s->dma && !s->dma_enabled) { 391 s->dma_cb = handle_satn; 392 return; 393 } 394 esp_set_pdma_cb(s, SATN_PDMA_CB); 395 if (esp_select(s) < 0) { 396 return; 397 } 398 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 399 if (cmdlen > 0) { 400 s->cmdfifo_cdb_offset = 1; 401 s->do_cmd = 0; 402 do_cmd(s); 403 } else if (cmdlen == 0) { 404 if (s->dma) { 405 esp_raise_drq(s); 406 } 407 s->do_cmd = 1; 408 /* Target present, but no cmd yet - switch to command phase */ 409 s->rregs[ESP_RSEQ] = SEQ_CD; 410 esp_set_phase(s, STAT_CD); 411 } 412 } 413 414 static void s_without_satn_pdma_cb(ESPState *s) 415 { 416 uint8_t buf[ESP_FIFO_SZ]; 417 int n; 418 419 /* Copy FIFO into cmdfifo */ 420 n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo)); 421 n = MIN(fifo8_num_free(&s->cmdfifo), n); 422 fifo8_push_all(&s->cmdfifo, buf, n); 423 424 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 425 s->cmdfifo_cdb_offset = 0; 426 s->do_cmd = 0; 427 do_cmd(s); 428 } 429 } 430 431 static void handle_s_without_atn(ESPState *s) 432 { 433 int32_t cmdlen; 434 435 if (s->dma && !s->dma_enabled) { 436 s->dma_cb = handle_s_without_atn; 437 return; 438 } 439 esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB); 440 if (esp_select(s) < 0) { 441 return; 442 } 443 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 444 if (cmdlen > 0) { 445 s->cmdfifo_cdb_offset = 0; 446 s->do_cmd = 0; 447 do_cmd(s); 448 } else if (cmdlen == 0) { 449 if (s->dma) { 450 esp_raise_drq(s); 451 } 452 s->do_cmd = 1; 453 /* Target present, but no cmd yet - switch to command phase */ 454 s->rregs[ESP_RSEQ] = SEQ_CD; 455 esp_set_phase(s, STAT_CD); 456 } 457 } 458 459 static void satn_stop_pdma_cb(ESPState *s) 460 { 461 uint8_t buf[ESP_FIFO_SZ]; 462 int n; 463 464 /* Copy FIFO into cmdfifo */ 465 n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo)); 466 n = MIN(fifo8_num_free(&s->cmdfifo), n); 467 fifo8_push_all(&s->cmdfifo, buf, n); 468 469 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 470 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 471 s->do_cmd = 1; 472 s->cmdfifo_cdb_offset = 1; 473 esp_set_phase(s, STAT_CD); 474 s->rregs[ESP_RSTAT] |= STAT_TC; 475 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 476 s->rregs[ESP_RSEQ] = SEQ_CD; 477 esp_raise_irq(s); 478 } 479 } 480 481 static void handle_satn_stop(ESPState *s) 482 { 483 int32_t cmdlen; 484 485 if (s->dma && !s->dma_enabled) { 486 s->dma_cb = handle_satn_stop; 487 return; 488 } 489 esp_set_pdma_cb(s, SATN_STOP_PDMA_CB); 490 if (esp_select(s) < 0) { 491 return; 492 } 493 cmdlen = get_cmd(s, 1); 494 if (cmdlen > 0) { 495 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 496 s->do_cmd = 1; 497 s->cmdfifo_cdb_offset = 1; 498 esp_set_phase(s, STAT_MO); 499 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 500 s->rregs[ESP_RSEQ] = SEQ_MO; 501 esp_raise_irq(s); 502 } else if (cmdlen == 0) { 503 if (s->dma) { 504 esp_raise_drq(s); 505 } 506 s->do_cmd = 1; 507 /* Target present, switch to message out phase */ 508 s->rregs[ESP_RSEQ] = SEQ_MO; 509 esp_set_phase(s, STAT_MO); 510 } 511 } 512 513 static void write_response_pdma_cb(ESPState *s) 514 { 515 esp_set_phase(s, STAT_ST); 516 s->rregs[ESP_RSTAT] |= STAT_TC; 517 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 518 s->rregs[ESP_RSEQ] = SEQ_CD; 519 esp_raise_irq(s); 520 } 521 522 static void write_response(ESPState *s) 523 { 524 uint8_t buf[2]; 525 526 trace_esp_write_response(s->status); 527 528 buf[0] = s->status; 529 buf[1] = 0; 530 531 if (s->dma) { 532 if (s->dma_memory_write) { 533 s->dma_memory_write(s->dma_opaque, buf, 2); 534 esp_set_phase(s, STAT_ST); 535 s->rregs[ESP_RSTAT] |= STAT_TC; 536 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 537 s->rregs[ESP_RSEQ] = SEQ_CD; 538 } else { 539 esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB); 540 esp_raise_drq(s); 541 return; 542 } 543 } else { 544 fifo8_reset(&s->fifo); 545 fifo8_push_all(&s->fifo, buf, 2); 546 s->rregs[ESP_RFLAGS] = 2; 547 } 548 esp_raise_irq(s); 549 } 550 551 static void esp_dma_done(ESPState *s) 552 { 553 s->rregs[ESP_RSTAT] |= STAT_TC; 554 s->rregs[ESP_RINTR] |= INTR_BS; 555 s->rregs[ESP_RFLAGS] = 0; 556 esp_set_tc(s, 0); 557 esp_raise_irq(s); 558 } 559 560 static void do_dma_pdma_cb(ESPState *s) 561 { 562 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 563 uint8_t buf[ESP_CMDFIFO_SZ]; 564 int len; 565 uint32_t n; 566 567 if (s->do_cmd) { 568 /* Copy FIFO into cmdfifo */ 569 n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo)); 570 n = MIN(fifo8_num_free(&s->cmdfifo), n); 571 fifo8_push_all(&s->cmdfifo, buf, n); 572 573 /* Ensure we have received complete command after SATN and stop */ 574 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { 575 return; 576 } 577 578 s->ti_size = 0; 579 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 580 /* No command received */ 581 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 582 return; 583 } 584 585 /* Command has been received */ 586 s->do_cmd = 0; 587 do_cmd(s); 588 } else { 589 /* 590 * Extra message out bytes received: update cmdfifo_cdb_offset 591 * and then switch to command phase 592 */ 593 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 594 esp_set_phase(s, STAT_CD); 595 s->rregs[ESP_RSTAT] |= STAT_TC; 596 s->rregs[ESP_RSEQ] = SEQ_CD; 597 s->rregs[ESP_RINTR] |= INTR_BS; 598 esp_raise_irq(s); 599 } 600 return; 601 } 602 603 if (!s->current_req) { 604 return; 605 } 606 607 if (to_device) { 608 /* Copy FIFO data to device */ 609 len = MIN(s->async_len, ESP_FIFO_SZ); 610 len = MIN(len, fifo8_num_used(&s->fifo)); 611 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 612 s->async_buf += n; 613 s->async_len -= n; 614 s->ti_size += n; 615 616 if (n < len) { 617 /* Unaligned accesses can cause FIFO wraparound */ 618 len = len - n; 619 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 620 s->async_buf += n; 621 s->async_len -= n; 622 s->ti_size += n; 623 } 624 625 if (s->async_len == 0) { 626 scsi_req_continue(s->current_req); 627 return; 628 } 629 630 if (esp_get_tc(s) == 0) { 631 esp_lower_drq(s); 632 esp_dma_done(s); 633 } 634 635 return; 636 } else { 637 if (s->async_len == 0) { 638 /* Defer until the scsi layer has completed */ 639 scsi_req_continue(s->current_req); 640 s->data_in_ready = false; 641 return; 642 } 643 644 if (esp_get_tc(s) == 0) { 645 esp_lower_drq(s); 646 esp_dma_done(s); 647 } 648 649 /* Copy device data to FIFO */ 650 len = MIN(s->async_len, esp_get_tc(s)); 651 len = MIN(len, fifo8_num_free(&s->fifo)); 652 fifo8_push_all(&s->fifo, s->async_buf, len); 653 s->async_buf += len; 654 s->async_len -= len; 655 s->ti_size -= len; 656 esp_set_tc(s, esp_get_tc(s) - len); 657 } 658 } 659 660 static void esp_do_dma(ESPState *s) 661 { 662 uint32_t len, cmdlen; 663 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 664 uint8_t buf[ESP_CMDFIFO_SZ]; 665 666 len = esp_get_tc(s); 667 if (s->do_cmd) { 668 /* 669 * handle_ti_cmd() case: esp_do_dma() is called only from 670 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 671 */ 672 cmdlen = fifo8_num_used(&s->cmdfifo); 673 trace_esp_do_dma(cmdlen, len); 674 if (s->dma_memory_read) { 675 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 676 s->dma_memory_read(s->dma_opaque, buf, len); 677 fifo8_push_all(&s->cmdfifo, buf, len); 678 esp_set_tc(s, esp_get_tc(s) - len); 679 } else { 680 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 681 esp_raise_drq(s); 682 return; 683 } 684 trace_esp_handle_ti_cmd(cmdlen); 685 s->ti_size = 0; 686 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 687 /* No command received */ 688 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 689 return; 690 } 691 692 /* Command has been received */ 693 s->do_cmd = 0; 694 do_cmd(s); 695 } else { 696 /* 697 * Extra message out bytes received: update cmdfifo_cdb_offset 698 * and then switch to command phase 699 */ 700 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 701 esp_set_phase(s, STAT_CD); 702 s->rregs[ESP_RSTAT] |= STAT_TC; 703 s->rregs[ESP_RSEQ] = SEQ_CD; 704 s->rregs[ESP_RINTR] |= INTR_BS; 705 esp_raise_irq(s); 706 } 707 return; 708 } 709 if (!s->current_req) { 710 return; 711 } 712 if (s->async_len == 0) { 713 /* Defer until data is available. */ 714 return; 715 } 716 if (len > s->async_len) { 717 len = s->async_len; 718 } 719 if (to_device) { 720 if (s->dma_memory_read) { 721 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 722 723 esp_set_tc(s, esp_get_tc(s) - len); 724 s->async_buf += len; 725 s->async_len -= len; 726 s->ti_size += len; 727 728 if (s->async_len == 0) { 729 scsi_req_continue(s->current_req); 730 /* 731 * If there is still data to be read from the device then 732 * complete the DMA operation immediately. Otherwise defer 733 * until the scsi layer has completed. 734 */ 735 return; 736 } 737 738 /* Partially filled a scsi buffer. Complete immediately. */ 739 esp_dma_done(s); 740 esp_lower_drq(s); 741 } else { 742 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 743 esp_raise_drq(s); 744 } 745 } else { 746 if (s->dma_memory_write) { 747 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 748 749 esp_set_tc(s, esp_get_tc(s) - len); 750 s->async_buf += len; 751 s->async_len -= len; 752 s->ti_size -= len; 753 754 if (s->async_len == 0) { 755 scsi_req_continue(s->current_req); 756 /* 757 * If there is still data to be read from the device then 758 * complete the DMA operation immediately. Otherwise defer 759 * until the scsi layer has completed. 760 */ 761 if (esp_get_tc(s) != 0 || s->ti_size == 0) { 762 return; 763 } 764 } 765 766 /* Partially filled a scsi buffer. Complete immediately. */ 767 esp_dma_done(s); 768 esp_lower_drq(s); 769 } else { 770 /* Adjust TC for any leftover data in the FIFO */ 771 if (!fifo8_is_empty(&s->fifo)) { 772 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 773 } 774 775 /* Copy device data to FIFO */ 776 len = MIN(len, fifo8_num_free(&s->fifo)); 777 fifo8_push_all(&s->fifo, s->async_buf, len); 778 s->async_buf += len; 779 s->async_len -= len; 780 s->ti_size -= len; 781 782 /* 783 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 784 * commands shorter than this must be padded accordingly 785 */ 786 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 787 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 788 esp_fifo_push(&s->fifo, 0); 789 len++; 790 } 791 } 792 793 esp_set_tc(s, esp_get_tc(s) - len); 794 esp_set_pdma_cb(s, DO_DMA_PDMA_CB); 795 esp_raise_drq(s); 796 } 797 } 798 } 799 800 static void esp_do_nodma(ESPState *s) 801 { 802 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 803 uint8_t buf[ESP_FIFO_SZ]; 804 uint32_t cmdlen; 805 int len, n; 806 807 if (s->do_cmd) { 808 /* Copy FIFO into cmdfifo */ 809 n = esp_fifo_pop_buf(&s->fifo, buf, fifo8_num_used(&s->fifo)); 810 n = MIN(fifo8_num_free(&s->cmdfifo), n); 811 fifo8_push_all(&s->cmdfifo, buf, n); 812 813 cmdlen = fifo8_num_used(&s->cmdfifo); 814 trace_esp_handle_ti_cmd(cmdlen); 815 s->ti_size = 0; 816 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 817 /* No command received */ 818 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 819 return; 820 } 821 822 /* Command has been received */ 823 s->do_cmd = 0; 824 do_cmd(s); 825 } else { 826 /* 827 * Extra message out bytes received: update cmdfifo_cdb_offset 828 * and then switch to command phase 829 */ 830 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 831 esp_set_phase(s, STAT_CD); 832 s->rregs[ESP_RSTAT] |= STAT_TC; 833 s->rregs[ESP_RSEQ] = SEQ_CD; 834 s->rregs[ESP_RINTR] |= INTR_BS; 835 esp_raise_irq(s); 836 } 837 return; 838 } 839 840 if (!s->current_req) { 841 return; 842 } 843 844 if (s->async_len == 0) { 845 /* Defer until data is available. */ 846 return; 847 } 848 849 if (to_device) { 850 len = MIN(s->async_len, ESP_FIFO_SZ); 851 len = MIN(len, fifo8_num_used(&s->fifo)); 852 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 853 s->async_buf += len; 854 s->async_len -= len; 855 s->ti_size += len; 856 } else { 857 if (fifo8_is_empty(&s->fifo)) { 858 fifo8_push(&s->fifo, s->async_buf[0]); 859 s->async_buf++; 860 s->async_len--; 861 s->ti_size--; 862 } 863 } 864 865 if (s->async_len == 0) { 866 scsi_req_continue(s->current_req); 867 return; 868 } 869 870 s->rregs[ESP_RINTR] |= INTR_BS; 871 esp_raise_irq(s); 872 } 873 874 static void esp_pdma_cb(ESPState *s) 875 { 876 switch (s->pdma_cb) { 877 case SATN_PDMA_CB: 878 satn_pdma_cb(s); 879 break; 880 case S_WITHOUT_SATN_PDMA_CB: 881 s_without_satn_pdma_cb(s); 882 break; 883 case SATN_STOP_PDMA_CB: 884 satn_stop_pdma_cb(s); 885 break; 886 case WRITE_RESPONSE_PDMA_CB: 887 write_response_pdma_cb(s); 888 break; 889 case DO_DMA_PDMA_CB: 890 do_dma_pdma_cb(s); 891 break; 892 default: 893 g_assert_not_reached(); 894 } 895 } 896 897 void esp_command_complete(SCSIRequest *req, size_t resid) 898 { 899 ESPState *s = req->hba_private; 900 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 901 902 trace_esp_command_complete(); 903 904 /* 905 * Non-DMA transfers from the target will leave the last byte in 906 * the FIFO so don't reset ti_size in this case 907 */ 908 if (s->dma || to_device) { 909 if (s->ti_size != 0) { 910 trace_esp_command_complete_unexpected(); 911 } 912 s->ti_size = 0; 913 } 914 915 s->async_len = 0; 916 if (req->status) { 917 trace_esp_command_complete_fail(); 918 } 919 s->status = req->status; 920 921 /* 922 * If the transfer is finished, switch to status phase. For non-DMA 923 * transfers from the target the last byte is still in the FIFO 924 */ 925 if (s->ti_size == 0) { 926 esp_set_phase(s, STAT_ST); 927 esp_dma_done(s); 928 esp_lower_drq(s); 929 } 930 931 if (s->current_req) { 932 scsi_req_unref(s->current_req); 933 s->current_req = NULL; 934 s->current_dev = NULL; 935 } 936 } 937 938 void esp_transfer_data(SCSIRequest *req, uint32_t len) 939 { 940 ESPState *s = req->hba_private; 941 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 942 uint32_t dmalen = esp_get_tc(s); 943 944 assert(!s->do_cmd); 945 trace_esp_transfer_data(dmalen, s->ti_size); 946 s->async_len = len; 947 s->async_buf = scsi_req_get_buf(req); 948 949 if (!to_device && !s->data_in_ready) { 950 /* 951 * Initial incoming data xfer is complete so raise command 952 * completion interrupt 953 */ 954 s->data_in_ready = true; 955 s->rregs[ESP_RSTAT] |= STAT_TC; 956 s->rregs[ESP_RINTR] |= INTR_BS; 957 esp_raise_irq(s); 958 } 959 960 if (s->ti_cmd == 0) { 961 /* 962 * Always perform the initial transfer upon reception of the next TI 963 * command to ensure the DMA/non-DMA status of the command is correct. 964 * It is not possible to use s->dma directly in the section below as 965 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 966 * async data transfer is delayed then s->dma is set incorrectly. 967 */ 968 return; 969 } 970 971 if (s->ti_cmd == (CMD_TI | CMD_DMA)) { 972 if (dmalen) { 973 esp_do_dma(s); 974 } else if (s->ti_size <= 0) { 975 /* 976 * If this was the last part of a DMA transfer then the 977 * completion interrupt is deferred to here. 978 */ 979 esp_dma_done(s); 980 esp_lower_drq(s); 981 } 982 } else if (s->ti_cmd == CMD_TI) { 983 esp_do_nodma(s); 984 } 985 } 986 987 static void handle_ti(ESPState *s) 988 { 989 uint32_t dmalen; 990 991 if (s->dma && !s->dma_enabled) { 992 s->dma_cb = handle_ti; 993 return; 994 } 995 996 s->ti_cmd = s->rregs[ESP_CMD]; 997 if (s->dma) { 998 dmalen = esp_get_tc(s); 999 trace_esp_handle_ti(dmalen); 1000 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1001 esp_do_dma(s); 1002 } else { 1003 trace_esp_handle_ti(s->ti_size); 1004 esp_do_nodma(s); 1005 } 1006 } 1007 1008 void esp_hard_reset(ESPState *s) 1009 { 1010 memset(s->rregs, 0, ESP_REGS); 1011 memset(s->wregs, 0, ESP_REGS); 1012 s->tchi_written = 0; 1013 s->ti_size = 0; 1014 s->async_len = 0; 1015 fifo8_reset(&s->fifo); 1016 fifo8_reset(&s->cmdfifo); 1017 s->dma = 0; 1018 s->do_cmd = 0; 1019 s->dma_cb = NULL; 1020 1021 s->rregs[ESP_CFG1] = 7; 1022 } 1023 1024 static void esp_soft_reset(ESPState *s) 1025 { 1026 qemu_irq_lower(s->irq); 1027 qemu_irq_lower(s->irq_data); 1028 esp_hard_reset(s); 1029 } 1030 1031 static void esp_bus_reset(ESPState *s) 1032 { 1033 bus_cold_reset(BUS(&s->bus)); 1034 } 1035 1036 static void parent_esp_reset(ESPState *s, int irq, int level) 1037 { 1038 if (level) { 1039 esp_soft_reset(s); 1040 } 1041 } 1042 1043 static void esp_run_cmd(ESPState *s) 1044 { 1045 uint8_t cmd = s->rregs[ESP_CMD]; 1046 1047 if (cmd & CMD_DMA) { 1048 s->dma = 1; 1049 /* Reload DMA counter. */ 1050 if (esp_get_stc(s) == 0) { 1051 esp_set_tc(s, 0x10000); 1052 } else { 1053 esp_set_tc(s, esp_get_stc(s)); 1054 } 1055 } else { 1056 s->dma = 0; 1057 } 1058 switch (cmd & CMD_CMD) { 1059 case CMD_NOP: 1060 trace_esp_mem_writeb_cmd_nop(cmd); 1061 break; 1062 case CMD_FLUSH: 1063 trace_esp_mem_writeb_cmd_flush(cmd); 1064 fifo8_reset(&s->fifo); 1065 break; 1066 case CMD_RESET: 1067 trace_esp_mem_writeb_cmd_reset(cmd); 1068 esp_soft_reset(s); 1069 break; 1070 case CMD_BUSRESET: 1071 trace_esp_mem_writeb_cmd_bus_reset(cmd); 1072 esp_bus_reset(s); 1073 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1074 s->rregs[ESP_RINTR] |= INTR_RST; 1075 esp_raise_irq(s); 1076 } 1077 break; 1078 case CMD_TI: 1079 trace_esp_mem_writeb_cmd_ti(cmd); 1080 handle_ti(s); 1081 break; 1082 case CMD_ICCS: 1083 trace_esp_mem_writeb_cmd_iccs(cmd); 1084 write_response(s); 1085 s->rregs[ESP_RINTR] |= INTR_FC; 1086 esp_set_phase(s, STAT_MI); 1087 break; 1088 case CMD_MSGACC: 1089 trace_esp_mem_writeb_cmd_msgacc(cmd); 1090 s->rregs[ESP_RINTR] |= INTR_DC; 1091 s->rregs[ESP_RSEQ] = 0; 1092 s->rregs[ESP_RFLAGS] = 0; 1093 esp_raise_irq(s); 1094 break; 1095 case CMD_PAD: 1096 trace_esp_mem_writeb_cmd_pad(cmd); 1097 s->rregs[ESP_RSTAT] = STAT_TC; 1098 s->rregs[ESP_RINTR] |= INTR_FC; 1099 s->rregs[ESP_RSEQ] = 0; 1100 break; 1101 case CMD_SATN: 1102 trace_esp_mem_writeb_cmd_satn(cmd); 1103 break; 1104 case CMD_RSTATN: 1105 trace_esp_mem_writeb_cmd_rstatn(cmd); 1106 break; 1107 case CMD_SEL: 1108 trace_esp_mem_writeb_cmd_sel(cmd); 1109 handle_s_without_atn(s); 1110 break; 1111 case CMD_SELATN: 1112 trace_esp_mem_writeb_cmd_selatn(cmd); 1113 handle_satn(s); 1114 break; 1115 case CMD_SELATNS: 1116 trace_esp_mem_writeb_cmd_selatns(cmd); 1117 handle_satn_stop(s); 1118 break; 1119 case CMD_ENSEL: 1120 trace_esp_mem_writeb_cmd_ensel(cmd); 1121 s->rregs[ESP_RINTR] = 0; 1122 break; 1123 case CMD_DISSEL: 1124 trace_esp_mem_writeb_cmd_dissel(cmd); 1125 s->rregs[ESP_RINTR] = 0; 1126 esp_raise_irq(s); 1127 break; 1128 default: 1129 trace_esp_error_unhandled_command(cmd); 1130 break; 1131 } 1132 } 1133 1134 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 1135 { 1136 uint32_t val; 1137 1138 switch (saddr) { 1139 case ESP_FIFO: 1140 if (s->dma_memory_read && s->dma_memory_write && 1141 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 1142 /* Data out. */ 1143 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 1144 s->rregs[ESP_FIFO] = 0; 1145 } else { 1146 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { 1147 if (s->ti_size) { 1148 esp_do_nodma(s); 1149 } else { 1150 /* 1151 * The last byte of a non-DMA transfer has been read out 1152 * of the FIFO so switch to status phase 1153 */ 1154 esp_set_phase(s, STAT_ST); 1155 s->rregs[ESP_RSTAT] |= STAT_TC; 1156 } 1157 } 1158 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 1159 } 1160 val = s->rregs[ESP_FIFO]; 1161 break; 1162 case ESP_RINTR: 1163 /* 1164 * Clear sequence step, interrupt register and all status bits 1165 * except TC 1166 */ 1167 val = s->rregs[ESP_RINTR]; 1168 s->rregs[ESP_RINTR] = 0; 1169 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1170 /* 1171 * According to the datasheet ESP_RSEQ should be cleared, but as the 1172 * emulation currently defers information transfers to the next TI 1173 * command leave it for now so that pedantic guests such as the old 1174 * Linux 2.6 driver see the correct flags before the next SCSI phase 1175 * transition. 1176 * 1177 * s->rregs[ESP_RSEQ] = SEQ_0; 1178 */ 1179 esp_lower_irq(s); 1180 break; 1181 case ESP_TCHI: 1182 /* Return the unique id if the value has never been written */ 1183 if (!s->tchi_written) { 1184 val = s->chip_id; 1185 } else { 1186 val = s->rregs[saddr]; 1187 } 1188 break; 1189 case ESP_RFLAGS: 1190 /* Bottom 5 bits indicate number of bytes in FIFO */ 1191 val = fifo8_num_used(&s->fifo); 1192 break; 1193 default: 1194 val = s->rregs[saddr]; 1195 break; 1196 } 1197 1198 trace_esp_mem_readb(saddr, val); 1199 return val; 1200 } 1201 1202 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 1203 { 1204 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 1205 switch (saddr) { 1206 case ESP_TCHI: 1207 s->tchi_written = true; 1208 /* fall through */ 1209 case ESP_TCLO: 1210 case ESP_TCMID: 1211 s->rregs[ESP_RSTAT] &= ~STAT_TC; 1212 break; 1213 case ESP_FIFO: 1214 if (s->do_cmd) { 1215 if (!fifo8_is_full(&s->fifo)) { 1216 esp_fifo_push(&s->fifo, val); 1217 esp_fifo_push(&s->cmdfifo, fifo8_pop(&s->fifo)); 1218 } 1219 1220 /* 1221 * If any unexpected message out/command phase data is 1222 * transferred using non-DMA, raise the interrupt 1223 */ 1224 if (s->rregs[ESP_CMD] == CMD_TI) { 1225 s->rregs[ESP_RINTR] |= INTR_BS; 1226 esp_raise_irq(s); 1227 } 1228 } else { 1229 esp_fifo_push(&s->fifo, val); 1230 } 1231 break; 1232 case ESP_CMD: 1233 s->rregs[saddr] = val; 1234 esp_run_cmd(s); 1235 break; 1236 case ESP_WBUSID ... ESP_WSYNO: 1237 break; 1238 case ESP_CFG1: 1239 case ESP_CFG2: case ESP_CFG3: 1240 case ESP_RES3: case ESP_RES4: 1241 s->rregs[saddr] = val; 1242 break; 1243 case ESP_WCCF ... ESP_WTEST: 1244 break; 1245 default: 1246 trace_esp_error_invalid_write(val, saddr); 1247 return; 1248 } 1249 s->wregs[saddr] = val; 1250 } 1251 1252 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1253 unsigned size, bool is_write, 1254 MemTxAttrs attrs) 1255 { 1256 return (size == 1) || (is_write && size == 4); 1257 } 1258 1259 static bool esp_is_before_version_5(void *opaque, int version_id) 1260 { 1261 ESPState *s = ESP(opaque); 1262 1263 version_id = MIN(version_id, s->mig_version_id); 1264 return version_id < 5; 1265 } 1266 1267 static bool esp_is_version_5(void *opaque, int version_id) 1268 { 1269 ESPState *s = ESP(opaque); 1270 1271 version_id = MIN(version_id, s->mig_version_id); 1272 return version_id >= 5; 1273 } 1274 1275 static bool esp_is_version_6(void *opaque, int version_id) 1276 { 1277 ESPState *s = ESP(opaque); 1278 1279 version_id = MIN(version_id, s->mig_version_id); 1280 return version_id >= 6; 1281 } 1282 1283 int esp_pre_save(void *opaque) 1284 { 1285 ESPState *s = ESP(object_resolve_path_component( 1286 OBJECT(opaque), "esp")); 1287 1288 s->mig_version_id = vmstate_esp.version_id; 1289 return 0; 1290 } 1291 1292 static int esp_post_load(void *opaque, int version_id) 1293 { 1294 ESPState *s = ESP(opaque); 1295 int len, i; 1296 1297 version_id = MIN(version_id, s->mig_version_id); 1298 1299 if (version_id < 5) { 1300 esp_set_tc(s, s->mig_dma_left); 1301 1302 /* Migrate ti_buf to fifo */ 1303 len = s->mig_ti_wptr - s->mig_ti_rptr; 1304 for (i = 0; i < len; i++) { 1305 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1306 } 1307 1308 /* Migrate cmdbuf to cmdfifo */ 1309 for (i = 0; i < s->mig_cmdlen; i++) { 1310 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1311 } 1312 } 1313 1314 s->mig_version_id = vmstate_esp.version_id; 1315 return 0; 1316 } 1317 1318 /* 1319 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the 1320 * guest CPU to perform the transfers between the SCSI bus and memory 1321 * itself. This is indicated by the dma_memory_read and dma_memory_write 1322 * functions being NULL (in contrast to the ESP PCI device) whilst 1323 * dma_enabled is still set. 1324 */ 1325 1326 static bool esp_pdma_needed(void *opaque) 1327 { 1328 ESPState *s = ESP(opaque); 1329 1330 return s->dma_memory_read == NULL && s->dma_memory_write == NULL && 1331 s->dma_enabled; 1332 } 1333 1334 static const VMStateDescription vmstate_esp_pdma = { 1335 .name = "esp/pdma", 1336 .version_id = 0, 1337 .minimum_version_id = 0, 1338 .needed = esp_pdma_needed, 1339 .fields = (const VMStateField[]) { 1340 VMSTATE_UINT8(pdma_cb, ESPState), 1341 VMSTATE_END_OF_LIST() 1342 } 1343 }; 1344 1345 const VMStateDescription vmstate_esp = { 1346 .name = "esp", 1347 .version_id = 6, 1348 .minimum_version_id = 3, 1349 .post_load = esp_post_load, 1350 .fields = (const VMStateField[]) { 1351 VMSTATE_BUFFER(rregs, ESPState), 1352 VMSTATE_BUFFER(wregs, ESPState), 1353 VMSTATE_INT32(ti_size, ESPState), 1354 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1355 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1356 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1357 VMSTATE_UINT32(status, ESPState), 1358 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1359 esp_is_before_version_5), 1360 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1361 esp_is_before_version_5), 1362 VMSTATE_UINT32(dma, ESPState), 1363 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1364 esp_is_before_version_5, 0, 16), 1365 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1366 esp_is_before_version_5, 16, 1367 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1368 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1369 VMSTATE_UINT32(do_cmd, ESPState), 1370 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1371 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1372 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1373 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1374 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1375 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1376 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1377 VMSTATE_END_OF_LIST() 1378 }, 1379 .subsections = (const VMStateDescription * const []) { 1380 &vmstate_esp_pdma, 1381 NULL 1382 } 1383 }; 1384 1385 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1386 uint64_t val, unsigned int size) 1387 { 1388 SysBusESPState *sysbus = opaque; 1389 ESPState *s = ESP(&sysbus->esp); 1390 uint32_t saddr; 1391 1392 saddr = addr >> sysbus->it_shift; 1393 esp_reg_write(s, saddr, val); 1394 } 1395 1396 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1397 unsigned int size) 1398 { 1399 SysBusESPState *sysbus = opaque; 1400 ESPState *s = ESP(&sysbus->esp); 1401 uint32_t saddr; 1402 1403 saddr = addr >> sysbus->it_shift; 1404 return esp_reg_read(s, saddr); 1405 } 1406 1407 static const MemoryRegionOps sysbus_esp_mem_ops = { 1408 .read = sysbus_esp_mem_read, 1409 .write = sysbus_esp_mem_write, 1410 .endianness = DEVICE_NATIVE_ENDIAN, 1411 .valid.accepts = esp_mem_accepts, 1412 }; 1413 1414 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1415 uint64_t val, unsigned int size) 1416 { 1417 SysBusESPState *sysbus = opaque; 1418 ESPState *s = ESP(&sysbus->esp); 1419 1420 trace_esp_pdma_write(size); 1421 1422 switch (size) { 1423 case 1: 1424 esp_pdma_write(s, val); 1425 break; 1426 case 2: 1427 esp_pdma_write(s, val >> 8); 1428 esp_pdma_write(s, val); 1429 break; 1430 } 1431 esp_pdma_cb(s); 1432 } 1433 1434 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1435 unsigned int size) 1436 { 1437 SysBusESPState *sysbus = opaque; 1438 ESPState *s = ESP(&sysbus->esp); 1439 uint64_t val = 0; 1440 1441 trace_esp_pdma_read(size); 1442 1443 switch (size) { 1444 case 1: 1445 val = esp_pdma_read(s); 1446 break; 1447 case 2: 1448 val = esp_pdma_read(s); 1449 val = (val << 8) | esp_pdma_read(s); 1450 break; 1451 } 1452 if (fifo8_num_used(&s->fifo) < 2) { 1453 esp_pdma_cb(s); 1454 } 1455 return val; 1456 } 1457 1458 static void *esp_load_request(QEMUFile *f, SCSIRequest *req) 1459 { 1460 ESPState *s = container_of(req->bus, ESPState, bus); 1461 1462 scsi_req_ref(req); 1463 s->current_req = req; 1464 return s; 1465 } 1466 1467 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1468 .read = sysbus_esp_pdma_read, 1469 .write = sysbus_esp_pdma_write, 1470 .endianness = DEVICE_NATIVE_ENDIAN, 1471 .valid.min_access_size = 1, 1472 .valid.max_access_size = 4, 1473 .impl.min_access_size = 1, 1474 .impl.max_access_size = 2, 1475 }; 1476 1477 static const struct SCSIBusInfo esp_scsi_info = { 1478 .tcq = false, 1479 .max_target = ESP_MAX_DEVS, 1480 .max_lun = 7, 1481 1482 .load_request = esp_load_request, 1483 .transfer_data = esp_transfer_data, 1484 .complete = esp_command_complete, 1485 .cancel = esp_request_cancelled 1486 }; 1487 1488 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1489 { 1490 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1491 ESPState *s = ESP(&sysbus->esp); 1492 1493 switch (irq) { 1494 case 0: 1495 parent_esp_reset(s, irq, level); 1496 break; 1497 case 1: 1498 esp_dma_enable(s, irq, level); 1499 break; 1500 } 1501 } 1502 1503 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1504 { 1505 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1506 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1507 ESPState *s = ESP(&sysbus->esp); 1508 1509 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1510 return; 1511 } 1512 1513 sysbus_init_irq(sbd, &s->irq); 1514 sysbus_init_irq(sbd, &s->irq_data); 1515 assert(sysbus->it_shift != -1); 1516 1517 s->chip_id = TCHI_FAS100A; 1518 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1519 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1520 sysbus_init_mmio(sbd, &sysbus->iomem); 1521 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1522 sysbus, "esp-pdma", 4); 1523 sysbus_init_mmio(sbd, &sysbus->pdma); 1524 1525 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1526 1527 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); 1528 } 1529 1530 static void sysbus_esp_hard_reset(DeviceState *dev) 1531 { 1532 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1533 ESPState *s = ESP(&sysbus->esp); 1534 1535 esp_hard_reset(s); 1536 } 1537 1538 static void sysbus_esp_init(Object *obj) 1539 { 1540 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1541 1542 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1543 } 1544 1545 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1546 .name = "sysbusespscsi", 1547 .version_id = 2, 1548 .minimum_version_id = 1, 1549 .pre_save = esp_pre_save, 1550 .fields = (const VMStateField[]) { 1551 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1552 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1553 VMSTATE_END_OF_LIST() 1554 } 1555 }; 1556 1557 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1558 { 1559 DeviceClass *dc = DEVICE_CLASS(klass); 1560 1561 dc->realize = sysbus_esp_realize; 1562 dc->reset = sysbus_esp_hard_reset; 1563 dc->vmsd = &vmstate_sysbus_esp_scsi; 1564 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1565 } 1566 1567 static const TypeInfo sysbus_esp_info = { 1568 .name = TYPE_SYSBUS_ESP, 1569 .parent = TYPE_SYS_BUS_DEVICE, 1570 .instance_init = sysbus_esp_init, 1571 .instance_size = sizeof(SysBusESPState), 1572 .class_init = sysbus_esp_class_init, 1573 }; 1574 1575 static void esp_finalize(Object *obj) 1576 { 1577 ESPState *s = ESP(obj); 1578 1579 fifo8_destroy(&s->fifo); 1580 fifo8_destroy(&s->cmdfifo); 1581 } 1582 1583 static void esp_init(Object *obj) 1584 { 1585 ESPState *s = ESP(obj); 1586 1587 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1588 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1589 } 1590 1591 static void esp_class_init(ObjectClass *klass, void *data) 1592 { 1593 DeviceClass *dc = DEVICE_CLASS(klass); 1594 1595 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1596 dc->user_creatable = false; 1597 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1598 } 1599 1600 static const TypeInfo esp_info = { 1601 .name = TYPE_ESP, 1602 .parent = TYPE_DEVICE, 1603 .instance_init = esp_init, 1604 .instance_finalize = esp_finalize, 1605 .instance_size = sizeof(ESPState), 1606 .class_init = esp_class_init, 1607 }; 1608 1609 static void esp_register_types(void) 1610 { 1611 type_register_static(&sysbus_esp_info); 1612 type_register_static(&esp_info); 1613 } 1614 1615 type_init(esp_register_types) 1616