1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 } 99 } 100 101 static uint32_t esp_get_tc(ESPState *s) 102 { 103 uint32_t dmalen; 104 105 dmalen = s->rregs[ESP_TCLO]; 106 dmalen |= s->rregs[ESP_TCMID] << 8; 107 dmalen |= s->rregs[ESP_TCHI] << 16; 108 109 return dmalen; 110 } 111 112 static void esp_set_tc(ESPState *s, uint32_t dmalen) 113 { 114 s->rregs[ESP_TCLO] = dmalen; 115 s->rregs[ESP_TCMID] = dmalen >> 8; 116 s->rregs[ESP_TCHI] = dmalen >> 16; 117 } 118 119 static void set_pdma(ESPState *s, enum pdma_origin_id origin, 120 uint32_t index, uint32_t len) 121 { 122 s->pdma_origin = origin; 123 s->pdma_start = index; 124 s->pdma_cur = index; 125 s->pdma_len = len; 126 } 127 128 static uint8_t *get_pdma_buf(ESPState *s) 129 { 130 switch (s->pdma_origin) { 131 case PDMA: 132 return s->pdma_buf; 133 case TI: 134 return s->ti_buf; 135 case CMD: 136 return s->cmdbuf; 137 case ASYNC: 138 return s->async_buf; 139 } 140 return NULL; 141 } 142 143 static int get_cmd_cb(ESPState *s) 144 { 145 int target; 146 147 target = s->wregs[ESP_WBUSID] & BUSID_DID; 148 149 s->ti_size = 0; 150 s->ti_rptr = 0; 151 s->ti_wptr = 0; 152 153 if (s->current_req) { 154 /* Started a new command before the old one finished. Cancel it. */ 155 scsi_req_cancel(s->current_req); 156 s->async_len = 0; 157 } 158 159 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 160 if (!s->current_dev) { 161 /* No such drive */ 162 s->rregs[ESP_RSTAT] = 0; 163 s->rregs[ESP_RINTR] = INTR_DC; 164 s->rregs[ESP_RSEQ] = SEQ_0; 165 esp_raise_irq(s); 166 return -1; 167 } 168 return 0; 169 } 170 171 static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen) 172 { 173 uint32_t dmalen; 174 int target; 175 176 target = s->wregs[ESP_WBUSID] & BUSID_DID; 177 if (s->dma) { 178 dmalen = esp_get_tc(s); 179 if (dmalen > buflen) { 180 return 0; 181 } 182 if (s->dma_memory_read) { 183 s->dma_memory_read(s->dma_opaque, buf, dmalen); 184 } else { 185 memcpy(s->pdma_buf, buf, dmalen); 186 set_pdma(s, PDMA, 0, dmalen); 187 esp_raise_drq(s); 188 return 0; 189 } 190 } else { 191 dmalen = s->ti_size; 192 if (dmalen > TI_BUFSZ) { 193 return 0; 194 } 195 memcpy(buf, s->ti_buf, dmalen); 196 buf[0] = buf[2] >> 5; 197 } 198 trace_esp_get_cmd(dmalen, target); 199 200 if (get_cmd_cb(s) < 0) { 201 return 0; 202 } 203 return dmalen; 204 } 205 206 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid) 207 { 208 int32_t datalen; 209 int lun; 210 SCSIDevice *current_lun; 211 212 trace_esp_do_busid_cmd(busid); 213 lun = busid & 7; 214 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun); 215 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s); 216 datalen = scsi_req_enqueue(s->current_req); 217 s->ti_size = datalen; 218 if (datalen != 0) { 219 s->rregs[ESP_RSTAT] = STAT_TC; 220 s->dma_left = 0; 221 s->dma_counter = 0; 222 if (datalen > 0) { 223 s->rregs[ESP_RSTAT] |= STAT_DI; 224 } else { 225 s->rregs[ESP_RSTAT] |= STAT_DO; 226 } 227 scsi_req_continue(s->current_req); 228 } 229 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; 230 s->rregs[ESP_RSEQ] = SEQ_CD; 231 esp_raise_irq(s); 232 } 233 234 static void do_cmd(ESPState *s, uint8_t *buf) 235 { 236 uint8_t busid = buf[0]; 237 238 do_busid_cmd(s, &buf[1], busid); 239 } 240 241 static void satn_pdma_cb(ESPState *s) 242 { 243 if (get_cmd_cb(s) < 0) { 244 return; 245 } 246 if (s->pdma_cur != s->pdma_start) { 247 do_cmd(s, get_pdma_buf(s) + s->pdma_start); 248 } 249 } 250 251 static void handle_satn(ESPState *s) 252 { 253 uint8_t buf[32]; 254 int len; 255 256 if (s->dma && !s->dma_enabled) { 257 s->dma_cb = handle_satn; 258 return; 259 } 260 s->pdma_cb = satn_pdma_cb; 261 len = get_cmd(s, buf, sizeof(buf)); 262 if (len) { 263 do_cmd(s, buf); 264 } 265 } 266 267 static void s_without_satn_pdma_cb(ESPState *s) 268 { 269 if (get_cmd_cb(s) < 0) { 270 return; 271 } 272 if (s->pdma_cur != s->pdma_start) { 273 do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0); 274 } 275 } 276 277 static void handle_s_without_atn(ESPState *s) 278 { 279 uint8_t buf[32]; 280 int len; 281 282 if (s->dma && !s->dma_enabled) { 283 s->dma_cb = handle_s_without_atn; 284 return; 285 } 286 s->pdma_cb = s_without_satn_pdma_cb; 287 len = get_cmd(s, buf, sizeof(buf)); 288 if (len) { 289 do_busid_cmd(s, buf, 0); 290 } 291 } 292 293 static void satn_stop_pdma_cb(ESPState *s) 294 { 295 if (get_cmd_cb(s) < 0) { 296 return; 297 } 298 s->cmdlen = s->pdma_cur - s->pdma_start; 299 if (s->cmdlen) { 300 trace_esp_handle_satn_stop(s->cmdlen); 301 s->do_cmd = 1; 302 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 303 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; 304 s->rregs[ESP_RSEQ] = SEQ_CD; 305 esp_raise_irq(s); 306 } 307 } 308 309 static void handle_satn_stop(ESPState *s) 310 { 311 if (s->dma && !s->dma_enabled) { 312 s->dma_cb = handle_satn_stop; 313 return; 314 } 315 s->pdma_cb = satn_stop_pdma_cb; 316 s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf)); 317 if (s->cmdlen) { 318 trace_esp_handle_satn_stop(s->cmdlen); 319 s->do_cmd = 1; 320 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 321 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; 322 s->rregs[ESP_RSEQ] = SEQ_CD; 323 esp_raise_irq(s); 324 } 325 } 326 327 static void write_response_pdma_cb(ESPState *s) 328 { 329 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 330 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; 331 s->rregs[ESP_RSEQ] = SEQ_CD; 332 esp_raise_irq(s); 333 } 334 335 static void write_response(ESPState *s) 336 { 337 trace_esp_write_response(s->status); 338 s->ti_buf[0] = s->status; 339 s->ti_buf[1] = 0; 340 if (s->dma) { 341 if (s->dma_memory_write) { 342 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2); 343 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 344 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; 345 s->rregs[ESP_RSEQ] = SEQ_CD; 346 } else { 347 set_pdma(s, TI, 0, 2); 348 s->pdma_cb = write_response_pdma_cb; 349 esp_raise_drq(s); 350 return; 351 } 352 } else { 353 s->ti_size = 2; 354 s->ti_rptr = 0; 355 s->ti_wptr = 2; 356 s->rregs[ESP_RFLAGS] = 2; 357 } 358 esp_raise_irq(s); 359 } 360 361 static void esp_dma_done(ESPState *s) 362 { 363 s->rregs[ESP_RSTAT] |= STAT_TC; 364 s->rregs[ESP_RINTR] = INTR_BS; 365 s->rregs[ESP_RSEQ] = 0; 366 s->rregs[ESP_RFLAGS] = 0; 367 esp_set_tc(s, 0); 368 esp_raise_irq(s); 369 } 370 371 static void do_dma_pdma_cb(ESPState *s) 372 { 373 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 374 int len = s->pdma_cur - s->pdma_start; 375 if (s->do_cmd) { 376 s->ti_size = 0; 377 s->cmdlen = 0; 378 s->do_cmd = 0; 379 do_cmd(s, s->cmdbuf); 380 return; 381 } 382 s->dma_left -= len; 383 s->async_buf += len; 384 s->async_len -= len; 385 if (to_device) { 386 s->ti_size += len; 387 } else { 388 s->ti_size -= len; 389 } 390 if (s->async_len == 0) { 391 scsi_req_continue(s->current_req); 392 /* 393 * If there is still data to be read from the device then 394 * complete the DMA operation immediately. Otherwise defer 395 * until the scsi layer has completed. 396 */ 397 if (to_device || s->dma_left != 0 || s->ti_size == 0) { 398 return; 399 } 400 } 401 402 /* Partially filled a scsi buffer. Complete immediately. */ 403 esp_dma_done(s); 404 } 405 406 static void esp_do_dma(ESPState *s) 407 { 408 uint32_t len; 409 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 410 411 len = s->dma_left; 412 if (s->do_cmd) { 413 /* 414 * handle_ti_cmd() case: esp_do_dma() is called only from 415 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 416 */ 417 trace_esp_do_dma(s->cmdlen, len); 418 assert(s->cmdlen <= sizeof(s->cmdbuf) && 419 len <= sizeof(s->cmdbuf) - s->cmdlen); 420 if (s->dma_memory_read) { 421 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len); 422 } else { 423 set_pdma(s, CMD, s->cmdlen, len); 424 s->pdma_cb = do_dma_pdma_cb; 425 esp_raise_drq(s); 426 return; 427 } 428 trace_esp_handle_ti_cmd(s->cmdlen); 429 s->ti_size = 0; 430 s->cmdlen = 0; 431 s->do_cmd = 0; 432 do_cmd(s, s->cmdbuf); 433 return; 434 } 435 if (s->async_len == 0) { 436 /* Defer until data is available. */ 437 return; 438 } 439 if (len > s->async_len) { 440 len = s->async_len; 441 } 442 if (to_device) { 443 if (s->dma_memory_read) { 444 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 445 } else { 446 set_pdma(s, ASYNC, 0, len); 447 s->pdma_cb = do_dma_pdma_cb; 448 esp_raise_drq(s); 449 return; 450 } 451 } else { 452 if (s->dma_memory_write) { 453 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 454 } else { 455 set_pdma(s, ASYNC, 0, len); 456 s->pdma_cb = do_dma_pdma_cb; 457 esp_raise_drq(s); 458 return; 459 } 460 } 461 s->dma_left -= len; 462 s->async_buf += len; 463 s->async_len -= len; 464 if (to_device) { 465 s->ti_size += len; 466 } else { 467 s->ti_size -= len; 468 } 469 if (s->async_len == 0) { 470 scsi_req_continue(s->current_req); 471 /* 472 * If there is still data to be read from the device then 473 * complete the DMA operation immediately. Otherwise defer 474 * until the scsi layer has completed. 475 */ 476 if (to_device || s->dma_left != 0 || s->ti_size == 0) { 477 return; 478 } 479 } 480 481 /* Partially filled a scsi buffer. Complete immediately. */ 482 esp_dma_done(s); 483 } 484 485 static void esp_report_command_complete(ESPState *s, uint32_t status) 486 { 487 trace_esp_command_complete(); 488 if (s->ti_size != 0) { 489 trace_esp_command_complete_unexpected(); 490 } 491 s->ti_size = 0; 492 s->dma_left = 0; 493 s->async_len = 0; 494 if (status) { 495 trace_esp_command_complete_fail(); 496 } 497 s->status = status; 498 s->rregs[ESP_RSTAT] = STAT_ST; 499 esp_dma_done(s); 500 if (s->current_req) { 501 scsi_req_unref(s->current_req); 502 s->current_req = NULL; 503 s->current_dev = NULL; 504 } 505 } 506 507 void esp_command_complete(SCSIRequest *req, size_t resid) 508 { 509 ESPState *s = req->hba_private; 510 511 if (s->rregs[ESP_RSTAT] & STAT_INT) { 512 /* 513 * Defer handling command complete until the previous 514 * interrupt has been handled. 515 */ 516 trace_esp_command_complete_deferred(); 517 s->deferred_status = req->status; 518 s->deferred_complete = true; 519 return; 520 } 521 esp_report_command_complete(s, req->status); 522 } 523 524 void esp_transfer_data(SCSIRequest *req, uint32_t len) 525 { 526 ESPState *s = req->hba_private; 527 528 assert(!s->do_cmd); 529 trace_esp_transfer_data(s->dma_left, s->ti_size); 530 s->async_len = len; 531 s->async_buf = scsi_req_get_buf(req); 532 if (s->dma_left) { 533 esp_do_dma(s); 534 } else if (s->dma_counter != 0 && s->ti_size <= 0) { 535 /* 536 * If this was the last part of a DMA transfer then the 537 * completion interrupt is deferred to here. 538 */ 539 esp_dma_done(s); 540 } 541 } 542 543 static void handle_ti(ESPState *s) 544 { 545 uint32_t dmalen, minlen; 546 547 if (s->dma && !s->dma_enabled) { 548 s->dma_cb = handle_ti; 549 return; 550 } 551 552 dmalen = esp_get_tc(s); 553 if (dmalen == 0) { 554 dmalen = 0x10000; 555 } 556 s->dma_counter = dmalen; 557 558 if (s->do_cmd) { 559 minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ; 560 } else if (s->ti_size < 0) { 561 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size; 562 } else { 563 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size; 564 } 565 trace_esp_handle_ti(minlen); 566 if (s->dma) { 567 s->dma_left = minlen; 568 s->rregs[ESP_RSTAT] &= ~STAT_TC; 569 esp_do_dma(s); 570 } else if (s->do_cmd) { 571 trace_esp_handle_ti_cmd(s->cmdlen); 572 s->ti_size = 0; 573 s->cmdlen = 0; 574 s->do_cmd = 0; 575 do_cmd(s, s->cmdbuf); 576 } 577 } 578 579 void esp_hard_reset(ESPState *s) 580 { 581 memset(s->rregs, 0, ESP_REGS); 582 memset(s->wregs, 0, ESP_REGS); 583 s->tchi_written = 0; 584 s->ti_size = 0; 585 s->ti_rptr = 0; 586 s->ti_wptr = 0; 587 s->dma = 0; 588 s->do_cmd = 0; 589 s->dma_cb = NULL; 590 591 s->rregs[ESP_CFG1] = 7; 592 } 593 594 static void esp_soft_reset(ESPState *s) 595 { 596 qemu_irq_lower(s->irq); 597 qemu_irq_lower(s->irq_data); 598 esp_hard_reset(s); 599 } 600 601 static void parent_esp_reset(ESPState *s, int irq, int level) 602 { 603 if (level) { 604 esp_soft_reset(s); 605 } 606 } 607 608 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 609 { 610 uint32_t val; 611 612 switch (saddr) { 613 case ESP_FIFO: 614 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 615 /* Data out. */ 616 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 617 s->rregs[ESP_FIFO] = 0; 618 } else if (s->ti_rptr < s->ti_wptr) { 619 s->ti_size--; 620 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++]; 621 } 622 if (s->ti_rptr == s->ti_wptr) { 623 s->ti_rptr = 0; 624 s->ti_wptr = 0; 625 } 626 val = s->rregs[ESP_FIFO]; 627 break; 628 case ESP_RINTR: 629 /* 630 * Clear sequence step, interrupt register and all status bits 631 * except TC 632 */ 633 val = s->rregs[ESP_RINTR]; 634 s->rregs[ESP_RINTR] = 0; 635 s->rregs[ESP_RSTAT] &= ~STAT_TC; 636 s->rregs[ESP_RSEQ] = SEQ_CD; 637 esp_lower_irq(s); 638 if (s->deferred_complete) { 639 esp_report_command_complete(s, s->deferred_status); 640 s->deferred_complete = false; 641 } 642 break; 643 case ESP_TCHI: 644 /* Return the unique id if the value has never been written */ 645 if (!s->tchi_written) { 646 val = s->chip_id; 647 } else { 648 val = s->rregs[saddr]; 649 } 650 break; 651 default: 652 val = s->rregs[saddr]; 653 break; 654 } 655 656 trace_esp_mem_readb(saddr, val); 657 return val; 658 } 659 660 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 661 { 662 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 663 switch (saddr) { 664 case ESP_TCHI: 665 s->tchi_written = true; 666 /* fall through */ 667 case ESP_TCLO: 668 case ESP_TCMID: 669 s->rregs[ESP_RSTAT] &= ~STAT_TC; 670 break; 671 case ESP_FIFO: 672 if (s->do_cmd) { 673 if (s->cmdlen < ESP_CMDBUF_SZ) { 674 s->cmdbuf[s->cmdlen++] = val & 0xff; 675 } else { 676 trace_esp_error_fifo_overrun(); 677 } 678 } else if (s->ti_wptr == TI_BUFSZ - 1) { 679 trace_esp_error_fifo_overrun(); 680 } else { 681 s->ti_size++; 682 s->ti_buf[s->ti_wptr++] = val & 0xff; 683 } 684 break; 685 case ESP_CMD: 686 s->rregs[saddr] = val; 687 if (val & CMD_DMA) { 688 s->dma = 1; 689 /* Reload DMA counter. */ 690 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO]; 691 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID]; 692 s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI]; 693 } else { 694 s->dma = 0; 695 } 696 switch (val & CMD_CMD) { 697 case CMD_NOP: 698 trace_esp_mem_writeb_cmd_nop(val); 699 break; 700 case CMD_FLUSH: 701 trace_esp_mem_writeb_cmd_flush(val); 702 /*s->ti_size = 0;*/ 703 s->rregs[ESP_RINTR] = INTR_FC; 704 s->rregs[ESP_RSEQ] = 0; 705 s->rregs[ESP_RFLAGS] = 0; 706 break; 707 case CMD_RESET: 708 trace_esp_mem_writeb_cmd_reset(val); 709 esp_soft_reset(s); 710 break; 711 case CMD_BUSRESET: 712 trace_esp_mem_writeb_cmd_bus_reset(val); 713 s->rregs[ESP_RINTR] = INTR_RST; 714 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 715 esp_raise_irq(s); 716 } 717 break; 718 case CMD_TI: 719 trace_esp_mem_writeb_cmd_ti(val); 720 handle_ti(s); 721 break; 722 case CMD_ICCS: 723 trace_esp_mem_writeb_cmd_iccs(val); 724 write_response(s); 725 s->rregs[ESP_RINTR] = INTR_FC; 726 s->rregs[ESP_RSTAT] |= STAT_MI; 727 break; 728 case CMD_MSGACC: 729 trace_esp_mem_writeb_cmd_msgacc(val); 730 s->rregs[ESP_RINTR] = INTR_DC; 731 s->rregs[ESP_RSEQ] = 0; 732 s->rregs[ESP_RFLAGS] = 0; 733 esp_raise_irq(s); 734 break; 735 case CMD_PAD: 736 trace_esp_mem_writeb_cmd_pad(val); 737 s->rregs[ESP_RSTAT] = STAT_TC; 738 s->rregs[ESP_RINTR] = INTR_FC; 739 s->rregs[ESP_RSEQ] = 0; 740 break; 741 case CMD_SATN: 742 trace_esp_mem_writeb_cmd_satn(val); 743 break; 744 case CMD_RSTATN: 745 trace_esp_mem_writeb_cmd_rstatn(val); 746 break; 747 case CMD_SEL: 748 trace_esp_mem_writeb_cmd_sel(val); 749 handle_s_without_atn(s); 750 break; 751 case CMD_SELATN: 752 trace_esp_mem_writeb_cmd_selatn(val); 753 handle_satn(s); 754 break; 755 case CMD_SELATNS: 756 trace_esp_mem_writeb_cmd_selatns(val); 757 handle_satn_stop(s); 758 break; 759 case CMD_ENSEL: 760 trace_esp_mem_writeb_cmd_ensel(val); 761 s->rregs[ESP_RINTR] = 0; 762 break; 763 case CMD_DISSEL: 764 trace_esp_mem_writeb_cmd_dissel(val); 765 s->rregs[ESP_RINTR] = 0; 766 esp_raise_irq(s); 767 break; 768 default: 769 trace_esp_error_unhandled_command(val); 770 break; 771 } 772 break; 773 case ESP_WBUSID ... ESP_WSYNO: 774 break; 775 case ESP_CFG1: 776 case ESP_CFG2: case ESP_CFG3: 777 case ESP_RES3: case ESP_RES4: 778 s->rregs[saddr] = val; 779 break; 780 case ESP_WCCF ... ESP_WTEST: 781 break; 782 default: 783 trace_esp_error_invalid_write(val, saddr); 784 return; 785 } 786 s->wregs[saddr] = val; 787 } 788 789 static bool esp_mem_accepts(void *opaque, hwaddr addr, 790 unsigned size, bool is_write, 791 MemTxAttrs attrs) 792 { 793 return (size == 1) || (is_write && size == 4); 794 } 795 796 static bool esp_pdma_needed(void *opaque) 797 { 798 ESPState *s = opaque; 799 return s->dma_memory_read == NULL && s->dma_memory_write == NULL && 800 s->dma_enabled; 801 } 802 803 static const VMStateDescription vmstate_esp_pdma = { 804 .name = "esp/pdma", 805 .version_id = 1, 806 .minimum_version_id = 1, 807 .needed = esp_pdma_needed, 808 .fields = (VMStateField[]) { 809 VMSTATE_BUFFER(pdma_buf, ESPState), 810 VMSTATE_INT32(pdma_origin, ESPState), 811 VMSTATE_UINT32(pdma_len, ESPState), 812 VMSTATE_UINT32(pdma_start, ESPState), 813 VMSTATE_UINT32(pdma_cur, ESPState), 814 VMSTATE_END_OF_LIST() 815 } 816 }; 817 818 static int esp_pre_save(void *opaque) 819 { 820 ESPState *s = ESP(opaque); 821 822 s->mig_version_id = vmstate_esp.version_id; 823 return 0; 824 } 825 826 static int esp_post_load(void *opaque, int version_id) 827 { 828 ESPState *s = ESP(opaque); 829 830 s->mig_version_id = vmstate_esp.version_id; 831 return 0; 832 } 833 834 const VMStateDescription vmstate_esp = { 835 .name = "esp", 836 .version_id = 5, 837 .minimum_version_id = 3, 838 .pre_save = esp_pre_save, 839 .post_load = esp_post_load, 840 .fields = (VMStateField[]) { 841 VMSTATE_BUFFER(rregs, ESPState), 842 VMSTATE_BUFFER(wregs, ESPState), 843 VMSTATE_INT32(ti_size, ESPState), 844 VMSTATE_UINT32(ti_rptr, ESPState), 845 VMSTATE_UINT32(ti_wptr, ESPState), 846 VMSTATE_BUFFER(ti_buf, ESPState), 847 VMSTATE_UINT32(status, ESPState), 848 VMSTATE_UINT32(deferred_status, ESPState), 849 VMSTATE_BOOL(deferred_complete, ESPState), 850 VMSTATE_UINT32(dma, ESPState), 851 VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16), 852 VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4), 853 VMSTATE_UINT32(cmdlen, ESPState), 854 VMSTATE_UINT32(do_cmd, ESPState), 855 VMSTATE_UINT32(dma_left, ESPState), 856 VMSTATE_END_OF_LIST() 857 }, 858 .subsections = (const VMStateDescription * []) { 859 &vmstate_esp_pdma, 860 NULL 861 } 862 }; 863 864 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 865 uint64_t val, unsigned int size) 866 { 867 SysBusESPState *sysbus = opaque; 868 ESPState *s = ESP(&sysbus->esp); 869 uint32_t saddr; 870 871 saddr = addr >> sysbus->it_shift; 872 esp_reg_write(s, saddr, val); 873 } 874 875 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 876 unsigned int size) 877 { 878 SysBusESPState *sysbus = opaque; 879 ESPState *s = ESP(&sysbus->esp); 880 uint32_t saddr; 881 882 saddr = addr >> sysbus->it_shift; 883 return esp_reg_read(s, saddr); 884 } 885 886 static const MemoryRegionOps sysbus_esp_mem_ops = { 887 .read = sysbus_esp_mem_read, 888 .write = sysbus_esp_mem_write, 889 .endianness = DEVICE_NATIVE_ENDIAN, 890 .valid.accepts = esp_mem_accepts, 891 }; 892 893 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 894 uint64_t val, unsigned int size) 895 { 896 SysBusESPState *sysbus = opaque; 897 ESPState *s = ESP(&sysbus->esp); 898 uint32_t dmalen; 899 uint8_t *buf = get_pdma_buf(s); 900 901 trace_esp_pdma_write(size); 902 903 dmalen = esp_get_tc(s); 904 if (dmalen == 0 || s->pdma_len == 0) { 905 return; 906 } 907 switch (size) { 908 case 1: 909 buf[s->pdma_cur++] = val; 910 s->pdma_len--; 911 dmalen--; 912 break; 913 case 2: 914 buf[s->pdma_cur++] = val >> 8; 915 buf[s->pdma_cur++] = val; 916 s->pdma_len -= 2; 917 dmalen -= 2; 918 break; 919 } 920 esp_set_tc(s, dmalen); 921 if (s->pdma_len == 0 && s->pdma_cb) { 922 esp_lower_drq(s); 923 s->pdma_cb(s); 924 s->pdma_cb = NULL; 925 } 926 } 927 928 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 929 unsigned int size) 930 { 931 SysBusESPState *sysbus = opaque; 932 ESPState *s = ESP(&sysbus->esp); 933 uint8_t *buf = get_pdma_buf(s); 934 uint64_t val = 0; 935 936 trace_esp_pdma_read(size); 937 938 if (s->pdma_len == 0) { 939 return 0; 940 } 941 switch (size) { 942 case 1: 943 val = buf[s->pdma_cur++]; 944 s->pdma_len--; 945 break; 946 case 2: 947 val = buf[s->pdma_cur++]; 948 val = (val << 8) | buf[s->pdma_cur++]; 949 s->pdma_len -= 2; 950 break; 951 } 952 953 if (s->pdma_len == 0 && s->pdma_cb) { 954 esp_lower_drq(s); 955 s->pdma_cb(s); 956 s->pdma_cb = NULL; 957 } 958 return val; 959 } 960 961 static const MemoryRegionOps sysbus_esp_pdma_ops = { 962 .read = sysbus_esp_pdma_read, 963 .write = sysbus_esp_pdma_write, 964 .endianness = DEVICE_NATIVE_ENDIAN, 965 .valid.min_access_size = 1, 966 .valid.max_access_size = 2, 967 }; 968 969 static const struct SCSIBusInfo esp_scsi_info = { 970 .tcq = false, 971 .max_target = ESP_MAX_DEVS, 972 .max_lun = 7, 973 974 .transfer_data = esp_transfer_data, 975 .complete = esp_command_complete, 976 .cancel = esp_request_cancelled 977 }; 978 979 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 980 { 981 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 982 ESPState *s = ESP(&sysbus->esp); 983 984 switch (irq) { 985 case 0: 986 parent_esp_reset(s, irq, level); 987 break; 988 case 1: 989 esp_dma_enable(opaque, irq, level); 990 break; 991 } 992 } 993 994 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 995 { 996 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 997 SysBusESPState *sysbus = SYSBUS_ESP(dev); 998 ESPState *s = ESP(&sysbus->esp); 999 1000 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1001 return; 1002 } 1003 1004 sysbus_init_irq(sbd, &s->irq); 1005 sysbus_init_irq(sbd, &s->irq_data); 1006 assert(sysbus->it_shift != -1); 1007 1008 s->chip_id = TCHI_FAS100A; 1009 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1010 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1011 sysbus_init_mmio(sbd, &sysbus->iomem); 1012 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1013 sysbus, "esp-pdma", 2); 1014 sysbus_init_mmio(sbd, &sysbus->pdma); 1015 1016 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1017 1018 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); 1019 } 1020 1021 static void sysbus_esp_hard_reset(DeviceState *dev) 1022 { 1023 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1024 ESPState *s = ESP(&sysbus->esp); 1025 1026 esp_hard_reset(s); 1027 } 1028 1029 static void sysbus_esp_init(Object *obj) 1030 { 1031 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1032 1033 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1034 } 1035 1036 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1037 .name = "sysbusespscsi", 1038 .version_id = 2, 1039 .minimum_version_id = 1, 1040 .fields = (VMStateField[]) { 1041 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1042 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1043 VMSTATE_END_OF_LIST() 1044 } 1045 }; 1046 1047 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1048 { 1049 DeviceClass *dc = DEVICE_CLASS(klass); 1050 1051 dc->realize = sysbus_esp_realize; 1052 dc->reset = sysbus_esp_hard_reset; 1053 dc->vmsd = &vmstate_sysbus_esp_scsi; 1054 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1055 } 1056 1057 static const TypeInfo sysbus_esp_info = { 1058 .name = TYPE_SYSBUS_ESP, 1059 .parent = TYPE_SYS_BUS_DEVICE, 1060 .instance_init = sysbus_esp_init, 1061 .instance_size = sizeof(SysBusESPState), 1062 .class_init = sysbus_esp_class_init, 1063 }; 1064 1065 static void esp_class_init(ObjectClass *klass, void *data) 1066 { 1067 DeviceClass *dc = DEVICE_CLASS(klass); 1068 1069 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1070 dc->user_creatable = false; 1071 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1072 } 1073 1074 static const TypeInfo esp_info = { 1075 .name = TYPE_ESP, 1076 .parent = TYPE_DEVICE, 1077 .instance_size = sizeof(ESPState), 1078 .class_init = esp_class_init, 1079 }; 1080 1081 static void esp_register_types(void) 1082 { 1083 type_register_static(&sysbus_esp_info); 1084 type_register_static(&esp_info); 1085 } 1086 1087 type_init(esp_register_types) 1088