1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 } 99 } 100 101 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 102 { 103 if (fifo8_num_used(fifo) == fifo->capacity) { 104 trace_esp_error_fifo_overrun(); 105 return; 106 } 107 108 fifo8_push(fifo, val); 109 } 110 111 static uint8_t esp_fifo_pop(Fifo8 *fifo) 112 { 113 if (fifo8_is_empty(fifo)) { 114 return 0; 115 } 116 117 return fifo8_pop(fifo); 118 } 119 120 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 121 { 122 const uint8_t *buf; 123 uint32_t n; 124 125 if (maxlen == 0) { 126 return 0; 127 } 128 129 buf = fifo8_pop_buf(fifo, maxlen, &n); 130 if (dest) { 131 memcpy(dest, buf, n); 132 } 133 134 return n; 135 } 136 137 static uint32_t esp_get_tc(ESPState *s) 138 { 139 uint32_t dmalen; 140 141 dmalen = s->rregs[ESP_TCLO]; 142 dmalen |= s->rregs[ESP_TCMID] << 8; 143 dmalen |= s->rregs[ESP_TCHI] << 16; 144 145 return dmalen; 146 } 147 148 static void esp_set_tc(ESPState *s, uint32_t dmalen) 149 { 150 s->rregs[ESP_TCLO] = dmalen; 151 s->rregs[ESP_TCMID] = dmalen >> 8; 152 s->rregs[ESP_TCHI] = dmalen >> 16; 153 } 154 155 static uint32_t esp_get_stc(ESPState *s) 156 { 157 uint32_t dmalen; 158 159 dmalen = s->wregs[ESP_TCLO]; 160 dmalen |= s->wregs[ESP_TCMID] << 8; 161 dmalen |= s->wregs[ESP_TCHI] << 16; 162 163 return dmalen; 164 } 165 166 static uint8_t esp_pdma_read(ESPState *s) 167 { 168 uint8_t val; 169 170 if (s->do_cmd) { 171 val = esp_fifo_pop(&s->cmdfifo); 172 } else { 173 val = esp_fifo_pop(&s->fifo); 174 } 175 176 return val; 177 } 178 179 static void esp_pdma_write(ESPState *s, uint8_t val) 180 { 181 uint32_t dmalen = esp_get_tc(s); 182 183 if (dmalen == 0) { 184 return; 185 } 186 187 if (s->do_cmd) { 188 esp_fifo_push(&s->cmdfifo, val); 189 } else { 190 esp_fifo_push(&s->fifo, val); 191 } 192 193 dmalen--; 194 esp_set_tc(s, dmalen); 195 } 196 197 static int esp_select(ESPState *s) 198 { 199 int target; 200 201 target = s->wregs[ESP_WBUSID] & BUSID_DID; 202 203 s->ti_size = 0; 204 fifo8_reset(&s->fifo); 205 206 if (s->current_req) { 207 /* Started a new command before the old one finished. Cancel it. */ 208 scsi_req_cancel(s->current_req); 209 s->async_len = 0; 210 } 211 212 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 213 if (!s->current_dev) { 214 /* No such drive */ 215 s->rregs[ESP_RSTAT] = 0; 216 s->rregs[ESP_RINTR] |= INTR_DC; 217 s->rregs[ESP_RSEQ] = SEQ_0; 218 esp_raise_irq(s); 219 return -1; 220 } 221 222 /* 223 * Note that we deliberately don't raise the IRQ here: this will be done 224 * either in do_busid_cmd() for DATA OUT transfers or by the deferred 225 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 226 */ 227 s->rregs[ESP_RINTR] |= INTR_FC; 228 s->rregs[ESP_RSEQ] = SEQ_CD; 229 return 0; 230 } 231 232 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 233 { 234 uint8_t buf[ESP_CMDFIFO_SZ]; 235 uint32_t dmalen, n; 236 int target; 237 238 target = s->wregs[ESP_WBUSID] & BUSID_DID; 239 if (s->dma) { 240 dmalen = MIN(esp_get_tc(s), maxlen); 241 if (dmalen == 0) { 242 return 0; 243 } 244 if (s->dma_memory_read) { 245 s->dma_memory_read(s->dma_opaque, buf, dmalen); 246 fifo8_push_all(&s->cmdfifo, buf, dmalen); 247 } else { 248 if (esp_select(s) < 0) { 249 fifo8_reset(&s->cmdfifo); 250 return -1; 251 } 252 esp_raise_drq(s); 253 fifo8_reset(&s->cmdfifo); 254 return 0; 255 } 256 } else { 257 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 258 if (dmalen == 0) { 259 return 0; 260 } 261 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 262 if (n >= 3) { 263 buf[0] = buf[2] >> 5; 264 } 265 fifo8_push_all(&s->cmdfifo, buf, n); 266 } 267 trace_esp_get_cmd(dmalen, target); 268 269 if (esp_select(s) < 0) { 270 fifo8_reset(&s->cmdfifo); 271 return -1; 272 } 273 return dmalen; 274 } 275 276 static void do_busid_cmd(ESPState *s, uint8_t busid) 277 { 278 uint32_t cmdlen; 279 int32_t datalen; 280 int lun; 281 SCSIDevice *current_lun; 282 uint8_t buf[ESP_CMDFIFO_SZ]; 283 284 trace_esp_do_busid_cmd(busid); 285 lun = busid & 7; 286 cmdlen = fifo8_num_used(&s->cmdfifo); 287 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 288 289 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun); 290 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s); 291 datalen = scsi_req_enqueue(s->current_req); 292 s->ti_size = datalen; 293 fifo8_reset(&s->cmdfifo); 294 if (datalen != 0) { 295 s->rregs[ESP_RSTAT] = STAT_TC; 296 s->rregs[ESP_RSEQ] = SEQ_CD; 297 s->ti_cmd = 0; 298 esp_set_tc(s, 0); 299 if (datalen > 0) { 300 /* 301 * Switch to DATA IN phase but wait until initial data xfer is 302 * complete before raising the command completion interrupt 303 */ 304 s->data_in_ready = false; 305 s->rregs[ESP_RSTAT] |= STAT_DI; 306 } else { 307 s->rregs[ESP_RSTAT] |= STAT_DO; 308 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 309 esp_raise_irq(s); 310 esp_lower_drq(s); 311 } 312 scsi_req_continue(s->current_req); 313 return; 314 } 315 } 316 317 static void do_cmd(ESPState *s) 318 { 319 uint8_t busid = fifo8_pop(&s->cmdfifo); 320 321 s->cmdfifo_cdb_offset--; 322 323 /* Ignore extended messages for now */ 324 if (s->cmdfifo_cdb_offset) { 325 esp_fifo_pop_buf(&s->cmdfifo, NULL, s->cmdfifo_cdb_offset); 326 s->cmdfifo_cdb_offset = 0; 327 } 328 329 do_busid_cmd(s, busid); 330 } 331 332 static void satn_pdma_cb(ESPState *s) 333 { 334 s->do_cmd = 0; 335 if (!fifo8_is_empty(&s->cmdfifo)) { 336 s->cmdfifo_cdb_offset = 1; 337 do_cmd(s); 338 } 339 } 340 341 static void handle_satn(ESPState *s) 342 { 343 int32_t cmdlen; 344 345 if (s->dma && !s->dma_enabled) { 346 s->dma_cb = handle_satn; 347 return; 348 } 349 s->pdma_cb = satn_pdma_cb; 350 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 351 if (cmdlen > 0) { 352 s->cmdfifo_cdb_offset = 1; 353 do_cmd(s); 354 } else if (cmdlen == 0) { 355 s->do_cmd = 1; 356 /* Target present, but no cmd yet - switch to command phase */ 357 s->rregs[ESP_RSEQ] = SEQ_CD; 358 s->rregs[ESP_RSTAT] = STAT_CD; 359 } 360 } 361 362 static void s_without_satn_pdma_cb(ESPState *s) 363 { 364 uint32_t len; 365 366 s->do_cmd = 0; 367 len = fifo8_num_used(&s->cmdfifo); 368 if (len) { 369 s->cmdfifo_cdb_offset = 0; 370 do_busid_cmd(s, 0); 371 } 372 } 373 374 static void handle_s_without_atn(ESPState *s) 375 { 376 int32_t cmdlen; 377 378 if (s->dma && !s->dma_enabled) { 379 s->dma_cb = handle_s_without_atn; 380 return; 381 } 382 s->pdma_cb = s_without_satn_pdma_cb; 383 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 384 if (cmdlen > 0) { 385 s->cmdfifo_cdb_offset = 0; 386 do_busid_cmd(s, 0); 387 } else if (cmdlen == 0) { 388 s->do_cmd = 1; 389 /* Target present, but no cmd yet - switch to command phase */ 390 s->rregs[ESP_RSEQ] = SEQ_CD; 391 s->rregs[ESP_RSTAT] = STAT_CD; 392 } 393 } 394 395 static void satn_stop_pdma_cb(ESPState *s) 396 { 397 s->do_cmd = 0; 398 if (!fifo8_is_empty(&s->cmdfifo)) { 399 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 400 s->do_cmd = 1; 401 s->cmdfifo_cdb_offset = 1; 402 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 403 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 404 s->rregs[ESP_RSEQ] = SEQ_CD; 405 esp_raise_irq(s); 406 } 407 } 408 409 static void handle_satn_stop(ESPState *s) 410 { 411 int32_t cmdlen; 412 413 if (s->dma && !s->dma_enabled) { 414 s->dma_cb = handle_satn_stop; 415 return; 416 } 417 s->pdma_cb = satn_stop_pdma_cb; 418 cmdlen = get_cmd(s, 1); 419 if (cmdlen > 0) { 420 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 421 s->do_cmd = 1; 422 s->cmdfifo_cdb_offset = 1; 423 s->rregs[ESP_RSTAT] = STAT_MO; 424 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 425 s->rregs[ESP_RSEQ] = SEQ_MO; 426 esp_raise_irq(s); 427 } else if (cmdlen == 0) { 428 s->do_cmd = 1; 429 /* Target present, switch to message out phase */ 430 s->rregs[ESP_RSEQ] = SEQ_MO; 431 s->rregs[ESP_RSTAT] = STAT_MO; 432 } 433 } 434 435 static void write_response_pdma_cb(ESPState *s) 436 { 437 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 438 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 439 s->rregs[ESP_RSEQ] = SEQ_CD; 440 esp_raise_irq(s); 441 } 442 443 static void write_response(ESPState *s) 444 { 445 uint8_t buf[2]; 446 447 trace_esp_write_response(s->status); 448 449 buf[0] = s->status; 450 buf[1] = 0; 451 452 if (s->dma) { 453 if (s->dma_memory_write) { 454 s->dma_memory_write(s->dma_opaque, buf, 2); 455 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 456 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 457 s->rregs[ESP_RSEQ] = SEQ_CD; 458 } else { 459 s->pdma_cb = write_response_pdma_cb; 460 esp_raise_drq(s); 461 return; 462 } 463 } else { 464 fifo8_reset(&s->fifo); 465 fifo8_push_all(&s->fifo, buf, 2); 466 s->rregs[ESP_RFLAGS] = 2; 467 } 468 esp_raise_irq(s); 469 } 470 471 static void esp_dma_done(ESPState *s) 472 { 473 s->rregs[ESP_RSTAT] |= STAT_TC; 474 s->rregs[ESP_RINTR] |= INTR_BS; 475 s->rregs[ESP_RSEQ] = 0; 476 s->rregs[ESP_RFLAGS] = 0; 477 esp_set_tc(s, 0); 478 esp_raise_irq(s); 479 } 480 481 static void do_dma_pdma_cb(ESPState *s) 482 { 483 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 484 int len; 485 uint32_t n; 486 487 if (s->do_cmd) { 488 s->ti_size = 0; 489 s->do_cmd = 0; 490 do_cmd(s); 491 esp_lower_drq(s); 492 return; 493 } 494 495 if (!s->current_req) { 496 return; 497 } 498 499 if (to_device) { 500 /* Copy FIFO data to device */ 501 len = MIN(s->async_len, ESP_FIFO_SZ); 502 len = MIN(len, fifo8_num_used(&s->fifo)); 503 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 504 s->async_buf += n; 505 s->async_len -= n; 506 s->ti_size += n; 507 508 if (n < len) { 509 /* Unaligned accesses can cause FIFO wraparound */ 510 len = len - n; 511 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 512 s->async_buf += n; 513 s->async_len -= n; 514 s->ti_size += n; 515 } 516 517 if (s->async_len == 0) { 518 scsi_req_continue(s->current_req); 519 return; 520 } 521 522 if (esp_get_tc(s) == 0) { 523 esp_lower_drq(s); 524 esp_dma_done(s); 525 } 526 527 return; 528 } else { 529 if (s->async_len == 0) { 530 /* Defer until the scsi layer has completed */ 531 scsi_req_continue(s->current_req); 532 s->data_in_ready = false; 533 return; 534 } 535 536 if (esp_get_tc(s) != 0) { 537 /* Copy device data to FIFO */ 538 len = MIN(s->async_len, esp_get_tc(s)); 539 len = MIN(len, fifo8_num_free(&s->fifo)); 540 fifo8_push_all(&s->fifo, s->async_buf, len); 541 s->async_buf += len; 542 s->async_len -= len; 543 s->ti_size -= len; 544 esp_set_tc(s, esp_get_tc(s) - len); 545 546 if (esp_get_tc(s) == 0) { 547 /* Indicate transfer to FIFO is complete */ 548 s->rregs[ESP_RSTAT] |= STAT_TC; 549 } 550 return; 551 } 552 553 /* Partially filled a scsi buffer. Complete immediately. */ 554 esp_lower_drq(s); 555 esp_dma_done(s); 556 } 557 } 558 559 static void esp_do_dma(ESPState *s) 560 { 561 uint32_t len, cmdlen; 562 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 563 uint8_t buf[ESP_CMDFIFO_SZ]; 564 565 len = esp_get_tc(s); 566 if (s->do_cmd) { 567 /* 568 * handle_ti_cmd() case: esp_do_dma() is called only from 569 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 570 */ 571 cmdlen = fifo8_num_used(&s->cmdfifo); 572 trace_esp_do_dma(cmdlen, len); 573 if (s->dma_memory_read) { 574 s->dma_memory_read(s->dma_opaque, buf, len); 575 fifo8_push_all(&s->cmdfifo, buf, len); 576 } else { 577 s->pdma_cb = do_dma_pdma_cb; 578 esp_raise_drq(s); 579 return; 580 } 581 trace_esp_handle_ti_cmd(cmdlen); 582 s->ti_size = 0; 583 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 584 /* No command received */ 585 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 586 return; 587 } 588 589 /* Command has been received */ 590 s->do_cmd = 0; 591 do_cmd(s); 592 } else { 593 /* 594 * Extra message out bytes received: update cmdfifo_cdb_offset 595 * and then switch to commmand phase 596 */ 597 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 598 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 599 s->rregs[ESP_RSEQ] = SEQ_CD; 600 s->rregs[ESP_RINTR] |= INTR_BS; 601 esp_raise_irq(s); 602 } 603 return; 604 } 605 if (!s->current_req) { 606 return; 607 } 608 if (s->async_len == 0) { 609 /* Defer until data is available. */ 610 return; 611 } 612 if (len > s->async_len) { 613 len = s->async_len; 614 } 615 if (to_device) { 616 if (s->dma_memory_read) { 617 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 618 } else { 619 s->pdma_cb = do_dma_pdma_cb; 620 esp_raise_drq(s); 621 return; 622 } 623 } else { 624 if (s->dma_memory_write) { 625 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 626 } else { 627 /* Adjust TC for any leftover data in the FIFO */ 628 if (!fifo8_is_empty(&s->fifo)) { 629 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 630 } 631 632 /* Copy device data to FIFO */ 633 len = MIN(len, fifo8_num_free(&s->fifo)); 634 fifo8_push_all(&s->fifo, s->async_buf, len); 635 s->async_buf += len; 636 s->async_len -= len; 637 s->ti_size -= len; 638 639 /* 640 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 641 * commands shorter than this must be padded accordingly 642 */ 643 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 644 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 645 esp_fifo_push(&s->fifo, 0); 646 len++; 647 } 648 } 649 650 esp_set_tc(s, esp_get_tc(s) - len); 651 s->pdma_cb = do_dma_pdma_cb; 652 esp_raise_drq(s); 653 654 /* Indicate transfer to FIFO is complete */ 655 s->rregs[ESP_RSTAT] |= STAT_TC; 656 return; 657 } 658 } 659 esp_set_tc(s, esp_get_tc(s) - len); 660 s->async_buf += len; 661 s->async_len -= len; 662 if (to_device) { 663 s->ti_size += len; 664 } else { 665 s->ti_size -= len; 666 } 667 if (s->async_len == 0) { 668 scsi_req_continue(s->current_req); 669 /* 670 * If there is still data to be read from the device then 671 * complete the DMA operation immediately. Otherwise defer 672 * until the scsi layer has completed. 673 */ 674 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { 675 return; 676 } 677 } 678 679 /* Partially filled a scsi buffer. Complete immediately. */ 680 esp_dma_done(s); 681 esp_lower_drq(s); 682 } 683 684 static void esp_do_nodma(ESPState *s) 685 { 686 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 687 uint32_t cmdlen; 688 int len; 689 690 if (s->do_cmd) { 691 cmdlen = fifo8_num_used(&s->cmdfifo); 692 trace_esp_handle_ti_cmd(cmdlen); 693 s->ti_size = 0; 694 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 695 /* No command received */ 696 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 697 return; 698 } 699 700 /* Command has been received */ 701 s->do_cmd = 0; 702 do_cmd(s); 703 } else { 704 /* 705 * Extra message out bytes received: update cmdfifo_cdb_offset 706 * and then switch to commmand phase 707 */ 708 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 709 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 710 s->rregs[ESP_RSEQ] = SEQ_CD; 711 s->rregs[ESP_RINTR] |= INTR_BS; 712 esp_raise_irq(s); 713 } 714 return; 715 } 716 717 if (!s->current_req) { 718 return; 719 } 720 721 if (s->async_len == 0) { 722 /* Defer until data is available. */ 723 return; 724 } 725 726 if (to_device) { 727 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); 728 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 729 s->async_buf += len; 730 s->async_len -= len; 731 s->ti_size += len; 732 } else { 733 len = MIN(s->ti_size, s->async_len); 734 len = MIN(len, fifo8_num_free(&s->fifo)); 735 fifo8_push_all(&s->fifo, s->async_buf, len); 736 s->async_buf += len; 737 s->async_len -= len; 738 s->ti_size -= len; 739 } 740 741 if (s->async_len == 0) { 742 scsi_req_continue(s->current_req); 743 744 if (to_device || s->ti_size == 0) { 745 return; 746 } 747 } 748 749 s->rregs[ESP_RINTR] |= INTR_BS; 750 esp_raise_irq(s); 751 } 752 753 void esp_command_complete(SCSIRequest *req, size_t resid) 754 { 755 ESPState *s = req->hba_private; 756 757 trace_esp_command_complete(); 758 if (s->ti_size != 0) { 759 trace_esp_command_complete_unexpected(); 760 } 761 s->ti_size = 0; 762 s->async_len = 0; 763 if (req->status) { 764 trace_esp_command_complete_fail(); 765 } 766 s->status = req->status; 767 s->rregs[ESP_RSTAT] = STAT_ST; 768 esp_dma_done(s); 769 esp_lower_drq(s); 770 if (s->current_req) { 771 scsi_req_unref(s->current_req); 772 s->current_req = NULL; 773 s->current_dev = NULL; 774 } 775 } 776 777 void esp_transfer_data(SCSIRequest *req, uint32_t len) 778 { 779 ESPState *s = req->hba_private; 780 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 781 uint32_t dmalen = esp_get_tc(s); 782 783 assert(!s->do_cmd); 784 trace_esp_transfer_data(dmalen, s->ti_size); 785 s->async_len = len; 786 s->async_buf = scsi_req_get_buf(req); 787 788 if (!to_device && !s->data_in_ready) { 789 /* 790 * Initial incoming data xfer is complete so raise command 791 * completion interrupt 792 */ 793 s->data_in_ready = true; 794 s->rregs[ESP_RSTAT] |= STAT_TC; 795 s->rregs[ESP_RINTR] |= INTR_BS; 796 esp_raise_irq(s); 797 798 /* 799 * If data is ready to transfer and the TI command has already 800 * been executed, start DMA immediately. Otherwise DMA will start 801 * when host sends the TI command 802 */ 803 if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) { 804 esp_do_dma(s); 805 } 806 return; 807 } 808 809 if (s->ti_cmd == 0) { 810 /* 811 * Always perform the initial transfer upon reception of the next TI 812 * command to ensure the DMA/non-DMA status of the command is correct. 813 * It is not possible to use s->dma directly in the section below as 814 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 815 * async data transfer is delayed then s->dma is set incorrectly. 816 */ 817 return; 818 } 819 820 if (s->ti_cmd & CMD_DMA) { 821 if (dmalen) { 822 esp_do_dma(s); 823 } else if (s->ti_size <= 0) { 824 /* 825 * If this was the last part of a DMA transfer then the 826 * completion interrupt is deferred to here. 827 */ 828 esp_dma_done(s); 829 esp_lower_drq(s); 830 } 831 } else { 832 esp_do_nodma(s); 833 } 834 } 835 836 static void handle_ti(ESPState *s) 837 { 838 uint32_t dmalen; 839 840 if (s->dma && !s->dma_enabled) { 841 s->dma_cb = handle_ti; 842 return; 843 } 844 845 s->ti_cmd = s->rregs[ESP_CMD]; 846 if (s->dma) { 847 dmalen = esp_get_tc(s); 848 trace_esp_handle_ti(dmalen); 849 s->rregs[ESP_RSTAT] &= ~STAT_TC; 850 esp_do_dma(s); 851 } else { 852 trace_esp_handle_ti(s->ti_size); 853 esp_do_nodma(s); 854 } 855 } 856 857 void esp_hard_reset(ESPState *s) 858 { 859 memset(s->rregs, 0, ESP_REGS); 860 memset(s->wregs, 0, ESP_REGS); 861 s->tchi_written = 0; 862 s->ti_size = 0; 863 fifo8_reset(&s->fifo); 864 fifo8_reset(&s->cmdfifo); 865 s->dma = 0; 866 s->do_cmd = 0; 867 s->dma_cb = NULL; 868 869 s->rregs[ESP_CFG1] = 7; 870 } 871 872 static void esp_soft_reset(ESPState *s) 873 { 874 qemu_irq_lower(s->irq); 875 qemu_irq_lower(s->irq_data); 876 esp_hard_reset(s); 877 } 878 879 static void parent_esp_reset(ESPState *s, int irq, int level) 880 { 881 if (level) { 882 esp_soft_reset(s); 883 } 884 } 885 886 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 887 { 888 uint32_t val; 889 890 switch (saddr) { 891 case ESP_FIFO: 892 if (s->dma_memory_read && s->dma_memory_write && 893 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 894 /* Data out. */ 895 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 896 s->rregs[ESP_FIFO] = 0; 897 } else { 898 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 899 } 900 val = s->rregs[ESP_FIFO]; 901 break; 902 case ESP_RINTR: 903 /* 904 * Clear sequence step, interrupt register and all status bits 905 * except TC 906 */ 907 val = s->rregs[ESP_RINTR]; 908 s->rregs[ESP_RINTR] = 0; 909 s->rregs[ESP_RSTAT] &= ~STAT_TC; 910 s->rregs[ESP_RSEQ] = SEQ_0; 911 esp_lower_irq(s); 912 break; 913 case ESP_TCHI: 914 /* Return the unique id if the value has never been written */ 915 if (!s->tchi_written) { 916 val = s->chip_id; 917 } else { 918 val = s->rregs[saddr]; 919 } 920 break; 921 case ESP_RFLAGS: 922 /* Bottom 5 bits indicate number of bytes in FIFO */ 923 val = fifo8_num_used(&s->fifo); 924 break; 925 default: 926 val = s->rregs[saddr]; 927 break; 928 } 929 930 trace_esp_mem_readb(saddr, val); 931 return val; 932 } 933 934 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 935 { 936 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 937 switch (saddr) { 938 case ESP_TCHI: 939 s->tchi_written = true; 940 /* fall through */ 941 case ESP_TCLO: 942 case ESP_TCMID: 943 s->rregs[ESP_RSTAT] &= ~STAT_TC; 944 break; 945 case ESP_FIFO: 946 if (s->do_cmd) { 947 esp_fifo_push(&s->cmdfifo, val); 948 } else { 949 esp_fifo_push(&s->fifo, val); 950 } 951 952 /* Non-DMA transfers raise an interrupt after every byte */ 953 if (s->rregs[ESP_CMD] == CMD_TI) { 954 s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS; 955 esp_raise_irq(s); 956 } 957 break; 958 case ESP_CMD: 959 s->rregs[saddr] = val; 960 if (val & CMD_DMA) { 961 s->dma = 1; 962 /* Reload DMA counter. */ 963 if (esp_get_stc(s) == 0) { 964 esp_set_tc(s, 0x10000); 965 } else { 966 esp_set_tc(s, esp_get_stc(s)); 967 } 968 } else { 969 s->dma = 0; 970 } 971 switch (val & CMD_CMD) { 972 case CMD_NOP: 973 trace_esp_mem_writeb_cmd_nop(val); 974 break; 975 case CMD_FLUSH: 976 trace_esp_mem_writeb_cmd_flush(val); 977 fifo8_reset(&s->fifo); 978 break; 979 case CMD_RESET: 980 trace_esp_mem_writeb_cmd_reset(val); 981 esp_soft_reset(s); 982 break; 983 case CMD_BUSRESET: 984 trace_esp_mem_writeb_cmd_bus_reset(val); 985 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 986 s->rregs[ESP_RINTR] |= INTR_RST; 987 esp_raise_irq(s); 988 } 989 break; 990 case CMD_TI: 991 trace_esp_mem_writeb_cmd_ti(val); 992 handle_ti(s); 993 break; 994 case CMD_ICCS: 995 trace_esp_mem_writeb_cmd_iccs(val); 996 write_response(s); 997 s->rregs[ESP_RINTR] |= INTR_FC; 998 s->rregs[ESP_RSTAT] |= STAT_MI; 999 break; 1000 case CMD_MSGACC: 1001 trace_esp_mem_writeb_cmd_msgacc(val); 1002 s->rregs[ESP_RINTR] |= INTR_DC; 1003 s->rregs[ESP_RSEQ] = 0; 1004 s->rregs[ESP_RFLAGS] = 0; 1005 esp_raise_irq(s); 1006 break; 1007 case CMD_PAD: 1008 trace_esp_mem_writeb_cmd_pad(val); 1009 s->rregs[ESP_RSTAT] = STAT_TC; 1010 s->rregs[ESP_RINTR] |= INTR_FC; 1011 s->rregs[ESP_RSEQ] = 0; 1012 break; 1013 case CMD_SATN: 1014 trace_esp_mem_writeb_cmd_satn(val); 1015 break; 1016 case CMD_RSTATN: 1017 trace_esp_mem_writeb_cmd_rstatn(val); 1018 break; 1019 case CMD_SEL: 1020 trace_esp_mem_writeb_cmd_sel(val); 1021 handle_s_without_atn(s); 1022 break; 1023 case CMD_SELATN: 1024 trace_esp_mem_writeb_cmd_selatn(val); 1025 handle_satn(s); 1026 break; 1027 case CMD_SELATNS: 1028 trace_esp_mem_writeb_cmd_selatns(val); 1029 handle_satn_stop(s); 1030 break; 1031 case CMD_ENSEL: 1032 trace_esp_mem_writeb_cmd_ensel(val); 1033 s->rregs[ESP_RINTR] = 0; 1034 break; 1035 case CMD_DISSEL: 1036 trace_esp_mem_writeb_cmd_dissel(val); 1037 s->rregs[ESP_RINTR] = 0; 1038 esp_raise_irq(s); 1039 break; 1040 default: 1041 trace_esp_error_unhandled_command(val); 1042 break; 1043 } 1044 break; 1045 case ESP_WBUSID ... ESP_WSYNO: 1046 break; 1047 case ESP_CFG1: 1048 case ESP_CFG2: case ESP_CFG3: 1049 case ESP_RES3: case ESP_RES4: 1050 s->rregs[saddr] = val; 1051 break; 1052 case ESP_WCCF ... ESP_WTEST: 1053 break; 1054 default: 1055 trace_esp_error_invalid_write(val, saddr); 1056 return; 1057 } 1058 s->wregs[saddr] = val; 1059 } 1060 1061 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1062 unsigned size, bool is_write, 1063 MemTxAttrs attrs) 1064 { 1065 return (size == 1) || (is_write && size == 4); 1066 } 1067 1068 static bool esp_is_before_version_5(void *opaque, int version_id) 1069 { 1070 ESPState *s = ESP(opaque); 1071 1072 version_id = MIN(version_id, s->mig_version_id); 1073 return version_id < 5; 1074 } 1075 1076 static bool esp_is_version_5(void *opaque, int version_id) 1077 { 1078 ESPState *s = ESP(opaque); 1079 1080 version_id = MIN(version_id, s->mig_version_id); 1081 return version_id == 5; 1082 } 1083 1084 int esp_pre_save(void *opaque) 1085 { 1086 ESPState *s = ESP(object_resolve_path_component( 1087 OBJECT(opaque), "esp")); 1088 1089 s->mig_version_id = vmstate_esp.version_id; 1090 return 0; 1091 } 1092 1093 static int esp_post_load(void *opaque, int version_id) 1094 { 1095 ESPState *s = ESP(opaque); 1096 int len, i; 1097 1098 version_id = MIN(version_id, s->mig_version_id); 1099 1100 if (version_id < 5) { 1101 esp_set_tc(s, s->mig_dma_left); 1102 1103 /* Migrate ti_buf to fifo */ 1104 len = s->mig_ti_wptr - s->mig_ti_rptr; 1105 for (i = 0; i < len; i++) { 1106 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1107 } 1108 1109 /* Migrate cmdbuf to cmdfifo */ 1110 for (i = 0; i < s->mig_cmdlen; i++) { 1111 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1112 } 1113 } 1114 1115 s->mig_version_id = vmstate_esp.version_id; 1116 return 0; 1117 } 1118 1119 const VMStateDescription vmstate_esp = { 1120 .name = "esp", 1121 .version_id = 5, 1122 .minimum_version_id = 3, 1123 .post_load = esp_post_load, 1124 .fields = (VMStateField[]) { 1125 VMSTATE_BUFFER(rregs, ESPState), 1126 VMSTATE_BUFFER(wregs, ESPState), 1127 VMSTATE_INT32(ti_size, ESPState), 1128 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1129 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1130 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1131 VMSTATE_UINT32(status, ESPState), 1132 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1133 esp_is_before_version_5), 1134 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1135 esp_is_before_version_5), 1136 VMSTATE_UINT32(dma, ESPState), 1137 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1138 esp_is_before_version_5, 0, 16), 1139 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1140 esp_is_before_version_5, 16, 1141 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1142 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1143 VMSTATE_UINT32(do_cmd, ESPState), 1144 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1145 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1146 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1147 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1148 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1149 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1150 VMSTATE_END_OF_LIST() 1151 }, 1152 }; 1153 1154 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1155 uint64_t val, unsigned int size) 1156 { 1157 SysBusESPState *sysbus = opaque; 1158 ESPState *s = ESP(&sysbus->esp); 1159 uint32_t saddr; 1160 1161 saddr = addr >> sysbus->it_shift; 1162 esp_reg_write(s, saddr, val); 1163 } 1164 1165 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1166 unsigned int size) 1167 { 1168 SysBusESPState *sysbus = opaque; 1169 ESPState *s = ESP(&sysbus->esp); 1170 uint32_t saddr; 1171 1172 saddr = addr >> sysbus->it_shift; 1173 return esp_reg_read(s, saddr); 1174 } 1175 1176 static const MemoryRegionOps sysbus_esp_mem_ops = { 1177 .read = sysbus_esp_mem_read, 1178 .write = sysbus_esp_mem_write, 1179 .endianness = DEVICE_NATIVE_ENDIAN, 1180 .valid.accepts = esp_mem_accepts, 1181 }; 1182 1183 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1184 uint64_t val, unsigned int size) 1185 { 1186 SysBusESPState *sysbus = opaque; 1187 ESPState *s = ESP(&sysbus->esp); 1188 uint32_t dmalen; 1189 1190 trace_esp_pdma_write(size); 1191 1192 switch (size) { 1193 case 1: 1194 esp_pdma_write(s, val); 1195 break; 1196 case 2: 1197 esp_pdma_write(s, val >> 8); 1198 esp_pdma_write(s, val); 1199 break; 1200 } 1201 dmalen = esp_get_tc(s); 1202 if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) { 1203 s->pdma_cb(s); 1204 } 1205 } 1206 1207 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1208 unsigned int size) 1209 { 1210 SysBusESPState *sysbus = opaque; 1211 ESPState *s = ESP(&sysbus->esp); 1212 uint64_t val = 0; 1213 1214 trace_esp_pdma_read(size); 1215 1216 switch (size) { 1217 case 1: 1218 val = esp_pdma_read(s); 1219 break; 1220 case 2: 1221 val = esp_pdma_read(s); 1222 val = (val << 8) | esp_pdma_read(s); 1223 break; 1224 } 1225 if (fifo8_num_used(&s->fifo) < 2) { 1226 s->pdma_cb(s); 1227 } 1228 return val; 1229 } 1230 1231 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1232 .read = sysbus_esp_pdma_read, 1233 .write = sysbus_esp_pdma_write, 1234 .endianness = DEVICE_NATIVE_ENDIAN, 1235 .valid.min_access_size = 1, 1236 .valid.max_access_size = 4, 1237 .impl.min_access_size = 1, 1238 .impl.max_access_size = 2, 1239 }; 1240 1241 static const struct SCSIBusInfo esp_scsi_info = { 1242 .tcq = false, 1243 .max_target = ESP_MAX_DEVS, 1244 .max_lun = 7, 1245 1246 .transfer_data = esp_transfer_data, 1247 .complete = esp_command_complete, 1248 .cancel = esp_request_cancelled 1249 }; 1250 1251 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1252 { 1253 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1254 ESPState *s = ESP(&sysbus->esp); 1255 1256 switch (irq) { 1257 case 0: 1258 parent_esp_reset(s, irq, level); 1259 break; 1260 case 1: 1261 esp_dma_enable(opaque, irq, level); 1262 break; 1263 } 1264 } 1265 1266 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1267 { 1268 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1269 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1270 ESPState *s = ESP(&sysbus->esp); 1271 1272 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1273 return; 1274 } 1275 1276 sysbus_init_irq(sbd, &s->irq); 1277 sysbus_init_irq(sbd, &s->irq_data); 1278 assert(sysbus->it_shift != -1); 1279 1280 s->chip_id = TCHI_FAS100A; 1281 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1282 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1283 sysbus_init_mmio(sbd, &sysbus->iomem); 1284 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1285 sysbus, "esp-pdma", 4); 1286 sysbus_init_mmio(sbd, &sysbus->pdma); 1287 1288 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1289 1290 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); 1291 } 1292 1293 static void sysbus_esp_hard_reset(DeviceState *dev) 1294 { 1295 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1296 ESPState *s = ESP(&sysbus->esp); 1297 1298 esp_hard_reset(s); 1299 } 1300 1301 static void sysbus_esp_init(Object *obj) 1302 { 1303 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1304 1305 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1306 } 1307 1308 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1309 .name = "sysbusespscsi", 1310 .version_id = 2, 1311 .minimum_version_id = 1, 1312 .pre_save = esp_pre_save, 1313 .fields = (VMStateField[]) { 1314 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1315 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1316 VMSTATE_END_OF_LIST() 1317 } 1318 }; 1319 1320 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1321 { 1322 DeviceClass *dc = DEVICE_CLASS(klass); 1323 1324 dc->realize = sysbus_esp_realize; 1325 dc->reset = sysbus_esp_hard_reset; 1326 dc->vmsd = &vmstate_sysbus_esp_scsi; 1327 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1328 } 1329 1330 static const TypeInfo sysbus_esp_info = { 1331 .name = TYPE_SYSBUS_ESP, 1332 .parent = TYPE_SYS_BUS_DEVICE, 1333 .instance_init = sysbus_esp_init, 1334 .instance_size = sizeof(SysBusESPState), 1335 .class_init = sysbus_esp_class_init, 1336 }; 1337 1338 static void esp_finalize(Object *obj) 1339 { 1340 ESPState *s = ESP(obj); 1341 1342 fifo8_destroy(&s->fifo); 1343 fifo8_destroy(&s->cmdfifo); 1344 } 1345 1346 static void esp_init(Object *obj) 1347 { 1348 ESPState *s = ESP(obj); 1349 1350 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1351 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1352 } 1353 1354 static void esp_class_init(ObjectClass *klass, void *data) 1355 { 1356 DeviceClass *dc = DEVICE_CLASS(klass); 1357 1358 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1359 dc->user_creatable = false; 1360 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1361 } 1362 1363 static const TypeInfo esp_info = { 1364 .name = TYPE_ESP, 1365 .parent = TYPE_DEVICE, 1366 .instance_init = esp_init, 1367 .instance_finalize = esp_finalize, 1368 .instance_size = sizeof(ESPState), 1369 .class_init = esp_class_init, 1370 }; 1371 1372 static void esp_register_types(void) 1373 { 1374 type_register_static(&sysbus_esp_info); 1375 type_register_static(&esp_info); 1376 } 1377 1378 type_init(esp_register_types) 1379