1 /* 2 * QEMU PowerMac CUDA device support 3 * 4 * Copyright (c) 2004-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include "qemu/osdep.h" 26 #include "hw/hw.h" 27 #include "hw/ppc/mac.h" 28 #include "hw/input/adb.h" 29 #include "qemu/timer.h" 30 #include "sysemu/sysemu.h" 31 #include "qemu/cutils.h" 32 #include "qemu/log.h" 33 34 /* XXX: implement all timer modes */ 35 36 /* debug CUDA */ 37 //#define DEBUG_CUDA 38 39 /* debug CUDA packets */ 40 //#define DEBUG_CUDA_PACKET 41 42 #ifdef DEBUG_CUDA 43 #define CUDA_DPRINTF(fmt, ...) \ 44 do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0) 45 #else 46 #define CUDA_DPRINTF(fmt, ...) 47 #endif 48 49 /* Bits in B data register: all active low */ 50 #define TREQ 0x08 /* Transfer request (input) */ 51 #define TACK 0x10 /* Transfer acknowledge (output) */ 52 #define TIP 0x20 /* Transfer in progress (output) */ 53 54 /* Bits in ACR */ 55 #define SR_CTRL 0x1c /* Shift register control bits */ 56 #define SR_EXT 0x0c /* Shift on external clock */ 57 #define SR_OUT 0x10 /* Shift out if 1 */ 58 59 /* Bits in IFR and IER */ 60 #define IER_SET 0x80 /* set bits in IER */ 61 #define IER_CLR 0 /* clear bits in IER */ 62 #define SR_INT 0x04 /* Shift register full/empty */ 63 #define SR_DATA_INT 0x08 64 #define SR_CLOCK_INT 0x10 65 #define T1_INT 0x40 /* Timer 1 interrupt */ 66 #define T2_INT 0x20 /* Timer 2 interrupt */ 67 68 /* Bits in ACR */ 69 #define T1MODE 0xc0 /* Timer 1 mode */ 70 #define T1MODE_CONT 0x40 /* continuous interrupts */ 71 72 /* commands (1st byte) */ 73 #define ADB_PACKET 0 74 #define CUDA_PACKET 1 75 #define ERROR_PACKET 2 76 #define TIMER_PACKET 3 77 #define POWER_PACKET 4 78 #define MACIIC_PACKET 5 79 #define PMU_PACKET 6 80 81 82 /* CUDA commands (2nd byte) */ 83 #define CUDA_WARM_START 0x0 84 #define CUDA_AUTOPOLL 0x1 85 #define CUDA_GET_6805_ADDR 0x2 86 #define CUDA_GET_TIME 0x3 87 #define CUDA_GET_PRAM 0x7 88 #define CUDA_SET_6805_ADDR 0x8 89 #define CUDA_SET_TIME 0x9 90 #define CUDA_POWERDOWN 0xa 91 #define CUDA_POWERUP_TIME 0xb 92 #define CUDA_SET_PRAM 0xc 93 #define CUDA_MS_RESET 0xd 94 #define CUDA_SEND_DFAC 0xe 95 #define CUDA_BATTERY_SWAP_SENSE 0x10 96 #define CUDA_RESET_SYSTEM 0x11 97 #define CUDA_SET_IPL 0x12 98 #define CUDA_FILE_SERVER_FLAG 0x13 99 #define CUDA_SET_AUTO_RATE 0x14 100 #define CUDA_GET_AUTO_RATE 0x16 101 #define CUDA_SET_DEVICE_LIST 0x19 102 #define CUDA_GET_DEVICE_LIST 0x1a 103 #define CUDA_SET_ONE_SECOND_MODE 0x1b 104 #define CUDA_SET_POWER_MESSAGES 0x21 105 #define CUDA_GET_SET_IIC 0x22 106 #define CUDA_WAKEUP 0x23 107 #define CUDA_TIMER_TICKLE 0x24 108 #define CUDA_COMBINED_FORMAT_IIC 0x25 109 110 #define CUDA_TIMER_FREQ (4700000 / 6) 111 112 /* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */ 113 #define RTC_OFFSET 2082844800 114 115 /* CUDA registers */ 116 #define CUDA_REG_B 0x00 117 #define CUDA_REG_A 0x01 118 #define CUDA_REG_DIRB 0x02 119 #define CUDA_REG_DIRA 0x03 120 #define CUDA_REG_T1CL 0x04 121 #define CUDA_REG_T1CH 0x05 122 #define CUDA_REG_T1LL 0x06 123 #define CUDA_REG_T1LH 0x07 124 #define CUDA_REG_T2CL 0x08 125 #define CUDA_REG_T2CH 0x09 126 #define CUDA_REG_SR 0x0a 127 #define CUDA_REG_ACR 0x0b 128 #define CUDA_REG_PCR 0x0c 129 #define CUDA_REG_IFR 0x0d 130 #define CUDA_REG_IER 0x0e 131 #define CUDA_REG_ANH 0x0f 132 133 static void cuda_update(CUDAState *s); 134 static void cuda_receive_packet_from_host(CUDAState *s, 135 const uint8_t *data, int len); 136 static void cuda_timer_update(CUDAState *s, CUDATimer *ti, 137 int64_t current_time); 138 139 static void cuda_update_irq(CUDAState *s) 140 { 141 if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) { 142 qemu_irq_raise(s->irq); 143 } else { 144 qemu_irq_lower(s->irq); 145 } 146 } 147 148 static uint64_t get_counter_value(CUDAState *s, CUDATimer *ti) 149 { 150 /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup */ 151 uint64_t tb_diff = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 152 s->tb_frequency, NANOSECONDS_PER_SECOND) - 153 ti->load_time; 154 155 return (tb_diff * 0xBF401675E5DULL) / (s->tb_frequency << 24); 156 } 157 158 static uint64_t get_counter_load_time(CUDAState *s, CUDATimer *ti) 159 { 160 uint64_t load_time = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 161 s->tb_frequency, NANOSECONDS_PER_SECOND); 162 return load_time; 163 } 164 165 static unsigned int get_counter(CUDAState *s, CUDATimer *ti) 166 { 167 int64_t d; 168 unsigned int counter; 169 170 d = get_counter_value(s, ti); 171 172 if (ti->index == 0) { 173 /* the timer goes down from latch to -1 (period of latch + 2) */ 174 if (d <= (ti->counter_value + 1)) { 175 counter = (ti->counter_value - d) & 0xffff; 176 } else { 177 counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); 178 counter = (ti->latch - counter) & 0xffff; 179 } 180 } else { 181 counter = (ti->counter_value - d) & 0xffff; 182 } 183 return counter; 184 } 185 186 static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val) 187 { 188 CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val); 189 ti->load_time = get_counter_load_time(s, ti); 190 ti->counter_value = val; 191 cuda_timer_update(s, ti, ti->load_time); 192 } 193 194 static int64_t get_next_irq_time(CUDATimer *ti, int64_t current_time) 195 { 196 int64_t d, next_time; 197 unsigned int counter; 198 199 /* current counter value */ 200 d = muldiv64(current_time - ti->load_time, 201 ti->frequency, NANOSECONDS_PER_SECOND); 202 /* the timer goes down from latch to -1 (period of latch + 2) */ 203 if (d <= (ti->counter_value + 1)) { 204 counter = (ti->counter_value - d) & 0xffff; 205 } else { 206 counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); 207 counter = (ti->latch - counter) & 0xffff; 208 } 209 210 /* Note: we consider the irq is raised on 0 */ 211 if (counter == 0xffff) { 212 next_time = d + ti->latch + 1; 213 } else if (counter == 0) { 214 next_time = d + ti->latch + 2; 215 } else { 216 next_time = d + counter; 217 } 218 CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n", 219 ti->latch, d, next_time - d); 220 next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, ti->frequency) + 221 ti->load_time; 222 if (next_time <= current_time) { 223 next_time = current_time + 1; 224 } 225 return next_time; 226 } 227 228 static void cuda_timer_update(CUDAState *s, CUDATimer *ti, 229 int64_t current_time) 230 { 231 if (!ti->timer) 232 return; 233 if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) { 234 timer_del(ti->timer); 235 } else { 236 ti->next_irq_time = get_next_irq_time(ti, current_time); 237 timer_mod(ti->timer, ti->next_irq_time); 238 } 239 } 240 241 static void cuda_timer1(void *opaque) 242 { 243 CUDAState *s = opaque; 244 CUDATimer *ti = &s->timers[0]; 245 246 cuda_timer_update(s, ti, ti->next_irq_time); 247 s->ifr |= T1_INT; 248 cuda_update_irq(s); 249 } 250 251 static void cuda_timer2(void *opaque) 252 { 253 CUDAState *s = opaque; 254 CUDATimer *ti = &s->timers[1]; 255 256 cuda_timer_update(s, ti, ti->next_irq_time); 257 s->ifr |= T2_INT; 258 cuda_update_irq(s); 259 } 260 261 static void cuda_set_sr_int(void *opaque) 262 { 263 CUDAState *s = opaque; 264 265 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); 266 s->ifr |= SR_INT; 267 cuda_update_irq(s); 268 } 269 270 static void cuda_delay_set_sr_int(CUDAState *s) 271 { 272 int64_t expire; 273 274 if (s->dirb == 0xff) { 275 /* Not in Mac OS, fire the IRQ directly */ 276 cuda_set_sr_int(s); 277 return; 278 } 279 280 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); 281 282 expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US; 283 timer_mod(s->sr_delay_timer, expire); 284 } 285 286 static uint64_t cuda_read(void *opaque, hwaddr addr, unsigned size) 287 { 288 CUDAState *s = opaque; 289 uint32_t val; 290 291 addr = (addr >> 9) & 0xf; 292 switch(addr) { 293 case CUDA_REG_B: 294 val = s->b; 295 break; 296 case CUDA_REG_A: 297 val = s->a; 298 break; 299 case CUDA_REG_DIRB: 300 val = s->dirb; 301 break; 302 case CUDA_REG_DIRA: 303 val = s->dira; 304 break; 305 case CUDA_REG_T1CL: 306 val = get_counter(s, &s->timers[0]) & 0xff; 307 s->ifr &= ~T1_INT; 308 cuda_update_irq(s); 309 break; 310 case CUDA_REG_T1CH: 311 val = get_counter(s, &s->timers[0]) >> 8; 312 cuda_update_irq(s); 313 break; 314 case CUDA_REG_T1LL: 315 val = s->timers[0].latch & 0xff; 316 break; 317 case CUDA_REG_T1LH: 318 /* XXX: check this */ 319 val = (s->timers[0].latch >> 8) & 0xff; 320 break; 321 case CUDA_REG_T2CL: 322 val = get_counter(s, &s->timers[1]) & 0xff; 323 s->ifr &= ~T2_INT; 324 cuda_update_irq(s); 325 break; 326 case CUDA_REG_T2CH: 327 val = get_counter(s, &s->timers[1]) >> 8; 328 break; 329 case CUDA_REG_SR: 330 val = s->sr; 331 s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT); 332 cuda_update_irq(s); 333 break; 334 case CUDA_REG_ACR: 335 val = s->acr; 336 break; 337 case CUDA_REG_PCR: 338 val = s->pcr; 339 break; 340 case CUDA_REG_IFR: 341 val = s->ifr; 342 if (s->ifr & s->ier) { 343 val |= 0x80; 344 } 345 break; 346 case CUDA_REG_IER: 347 val = s->ier | 0x80; 348 break; 349 default: 350 case CUDA_REG_ANH: 351 val = s->anh; 352 break; 353 } 354 if (addr != CUDA_REG_IFR || val != 0) { 355 CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val); 356 } 357 358 return val; 359 } 360 361 static void cuda_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) 362 { 363 CUDAState *s = opaque; 364 365 addr = (addr >> 9) & 0xf; 366 CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); 367 368 switch(addr) { 369 case CUDA_REG_B: 370 s->b = (s->b & ~s->dirb) | (val & s->dirb); 371 cuda_update(s); 372 break; 373 case CUDA_REG_A: 374 s->a = (s->a & ~s->dira) | (val & s->dira); 375 break; 376 case CUDA_REG_DIRB: 377 s->dirb = val; 378 break; 379 case CUDA_REG_DIRA: 380 s->dira = val; 381 break; 382 case CUDA_REG_T1CL: 383 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; 384 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 385 break; 386 case CUDA_REG_T1CH: 387 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); 388 s->ifr &= ~T1_INT; 389 set_counter(s, &s->timers[0], s->timers[0].latch); 390 break; 391 case CUDA_REG_T1LL: 392 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; 393 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 394 break; 395 case CUDA_REG_T1LH: 396 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); 397 s->ifr &= ~T1_INT; 398 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 399 break; 400 case CUDA_REG_T2CL: 401 s->timers[1].latch = (s->timers[1].latch & 0xff00) | val; 402 break; 403 case CUDA_REG_T2CH: 404 /* To ensure T2 generates an interrupt on zero crossing with the 405 common timer code, write the value directly from the latch to 406 the counter */ 407 s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8); 408 s->ifr &= ~T2_INT; 409 set_counter(s, &s->timers[1], s->timers[1].latch); 410 break; 411 case CUDA_REG_SR: 412 s->sr = val; 413 break; 414 case CUDA_REG_ACR: 415 s->acr = val; 416 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 417 break; 418 case CUDA_REG_PCR: 419 s->pcr = val; 420 break; 421 case CUDA_REG_IFR: 422 /* reset bits */ 423 s->ifr &= ~val; 424 cuda_update_irq(s); 425 break; 426 case CUDA_REG_IER: 427 if (val & IER_SET) { 428 /* set bits */ 429 s->ier |= val & 0x7f; 430 } else { 431 /* reset bits */ 432 s->ier &= ~val; 433 } 434 cuda_update_irq(s); 435 break; 436 default: 437 case CUDA_REG_ANH: 438 s->anh = val; 439 break; 440 } 441 } 442 443 /* NOTE: TIP and TREQ are negated */ 444 static void cuda_update(CUDAState *s) 445 { 446 int packet_received, len; 447 448 packet_received = 0; 449 if (!(s->b & TIP)) { 450 /* transfer requested from host */ 451 452 if (s->acr & SR_OUT) { 453 /* data output */ 454 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { 455 if (s->data_out_index < sizeof(s->data_out)) { 456 CUDA_DPRINTF("send: %02x\n", s->sr); 457 s->data_out[s->data_out_index++] = s->sr; 458 cuda_delay_set_sr_int(s); 459 } 460 } 461 } else { 462 if (s->data_in_index < s->data_in_size) { 463 /* data input */ 464 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { 465 s->sr = s->data_in[s->data_in_index++]; 466 CUDA_DPRINTF("recv: %02x\n", s->sr); 467 /* indicate end of transfer */ 468 if (s->data_in_index >= s->data_in_size) { 469 s->b = (s->b | TREQ); 470 } 471 cuda_delay_set_sr_int(s); 472 } 473 } 474 } 475 } else { 476 /* no transfer requested: handle sync case */ 477 if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) { 478 /* update TREQ state each time TACK change state */ 479 if (s->b & TACK) 480 s->b = (s->b | TREQ); 481 else 482 s->b = (s->b & ~TREQ); 483 cuda_delay_set_sr_int(s); 484 } else { 485 if (!(s->last_b & TIP)) { 486 /* handle end of host to cuda transfer */ 487 packet_received = (s->data_out_index > 0); 488 /* always an IRQ at the end of transfer */ 489 cuda_delay_set_sr_int(s); 490 } 491 /* signal if there is data to read */ 492 if (s->data_in_index < s->data_in_size) { 493 s->b = (s->b & ~TREQ); 494 } 495 } 496 } 497 498 s->last_acr = s->acr; 499 s->last_b = s->b; 500 501 /* NOTE: cuda_receive_packet_from_host() can call cuda_update() 502 recursively */ 503 if (packet_received) { 504 len = s->data_out_index; 505 s->data_out_index = 0; 506 cuda_receive_packet_from_host(s, s->data_out, len); 507 } 508 } 509 510 static void cuda_send_packet_to_host(CUDAState *s, 511 const uint8_t *data, int len) 512 { 513 #ifdef DEBUG_CUDA_PACKET 514 { 515 int i; 516 printf("cuda_send_packet_to_host:\n"); 517 for(i = 0; i < len; i++) 518 printf(" %02x", data[i]); 519 printf("\n"); 520 } 521 #endif 522 memcpy(s->data_in, data, len); 523 s->data_in_size = len; 524 s->data_in_index = 0; 525 cuda_update(s); 526 cuda_delay_set_sr_int(s); 527 } 528 529 static void cuda_adb_poll(void *opaque) 530 { 531 CUDAState *s = opaque; 532 uint8_t obuf[ADB_MAX_OUT_LEN + 2]; 533 int olen; 534 535 olen = adb_poll(&s->adb_bus, obuf + 2, s->adb_poll_mask); 536 if (olen > 0) { 537 obuf[0] = ADB_PACKET; 538 obuf[1] = 0x40; /* polled data */ 539 cuda_send_packet_to_host(s, obuf, olen + 2); 540 } 541 timer_mod(s->adb_poll_timer, 542 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 543 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 544 } 545 546 /* description of commands */ 547 typedef struct CudaCommand { 548 uint8_t command; 549 const char *name; 550 bool (*handler)(CUDAState *s, 551 const uint8_t *in_args, int in_len, 552 uint8_t *out_args, int *out_len); 553 } CudaCommand; 554 555 static bool cuda_cmd_autopoll(CUDAState *s, 556 const uint8_t *in_data, int in_len, 557 uint8_t *out_data, int *out_len) 558 { 559 int autopoll; 560 561 if (in_len != 1) { 562 return false; 563 } 564 565 autopoll = (in_data[0] != 0); 566 if (autopoll != s->autopoll) { 567 s->autopoll = autopoll; 568 if (autopoll) { 569 timer_mod(s->adb_poll_timer, 570 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 571 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 572 } else { 573 timer_del(s->adb_poll_timer); 574 } 575 } 576 return true; 577 } 578 579 static bool cuda_cmd_set_autorate(CUDAState *s, 580 const uint8_t *in_data, int in_len, 581 uint8_t *out_data, int *out_len) 582 { 583 if (in_len != 1) { 584 return false; 585 } 586 587 /* we don't want a period of 0 ms */ 588 /* FIXME: check what real hardware does */ 589 if (in_data[0] == 0) { 590 return false; 591 } 592 593 s->autopoll_rate_ms = in_data[0]; 594 if (s->autopoll) { 595 timer_mod(s->adb_poll_timer, 596 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 597 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 598 } 599 return true; 600 } 601 602 static bool cuda_cmd_set_device_list(CUDAState *s, 603 const uint8_t *in_data, int in_len, 604 uint8_t *out_data, int *out_len) 605 { 606 if (in_len != 2) { 607 return false; 608 } 609 610 s->adb_poll_mask = (((uint16_t)in_data[0]) << 8) | in_data[1]; 611 return true; 612 } 613 614 static bool cuda_cmd_powerdown(CUDAState *s, 615 const uint8_t *in_data, int in_len, 616 uint8_t *out_data, int *out_len) 617 { 618 if (in_len != 0) { 619 return false; 620 } 621 622 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 623 return true; 624 } 625 626 static bool cuda_cmd_reset_system(CUDAState *s, 627 const uint8_t *in_data, int in_len, 628 uint8_t *out_data, int *out_len) 629 { 630 if (in_len != 0) { 631 return false; 632 } 633 634 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 635 return true; 636 } 637 638 static bool cuda_cmd_set_file_server_flag(CUDAState *s, 639 const uint8_t *in_data, int in_len, 640 uint8_t *out_data, int *out_len) 641 { 642 if (in_len != 1) { 643 return false; 644 } 645 646 qemu_log_mask(LOG_UNIMP, 647 "CUDA: unimplemented command FILE_SERVER_FLAG %d\n", 648 in_data[0]); 649 return true; 650 } 651 652 static bool cuda_cmd_set_power_message(CUDAState *s, 653 const uint8_t *in_data, int in_len, 654 uint8_t *out_data, int *out_len) 655 { 656 if (in_len != 1) { 657 return false; 658 } 659 660 qemu_log_mask(LOG_UNIMP, 661 "CUDA: unimplemented command SET_POWER_MESSAGE %d\n", 662 in_data[0]); 663 return true; 664 } 665 666 static bool cuda_cmd_get_time(CUDAState *s, 667 const uint8_t *in_data, int in_len, 668 uint8_t *out_data, int *out_len) 669 { 670 uint32_t ti; 671 672 if (in_len != 0) { 673 return false; 674 } 675 676 ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) 677 / NANOSECONDS_PER_SECOND); 678 out_data[0] = ti >> 24; 679 out_data[1] = ti >> 16; 680 out_data[2] = ti >> 8; 681 out_data[3] = ti; 682 *out_len = 4; 683 return true; 684 } 685 686 static bool cuda_cmd_set_time(CUDAState *s, 687 const uint8_t *in_data, int in_len, 688 uint8_t *out_data, int *out_len) 689 { 690 uint32_t ti; 691 692 if (in_len != 4) { 693 return false; 694 } 695 696 ti = (((uint32_t)in_data[0]) << 24) + (((uint32_t)in_data[1]) << 16) 697 + (((uint32_t)in_data[2]) << 8) + in_data[3]; 698 s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) 699 / NANOSECONDS_PER_SECOND); 700 return true; 701 } 702 703 static const CudaCommand handlers[] = { 704 { CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll }, 705 { CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate }, 706 { CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list }, 707 { CUDA_POWERDOWN, "POWERDOWN", cuda_cmd_powerdown }, 708 { CUDA_RESET_SYSTEM, "RESET_SYSTEM", cuda_cmd_reset_system }, 709 { CUDA_FILE_SERVER_FLAG, "FILE_SERVER_FLAG", 710 cuda_cmd_set_file_server_flag }, 711 { CUDA_SET_POWER_MESSAGES, "SET_POWER_MESSAGES", 712 cuda_cmd_set_power_message }, 713 { CUDA_GET_TIME, "GET_TIME", cuda_cmd_get_time }, 714 { CUDA_SET_TIME, "SET_TIME", cuda_cmd_set_time }, 715 }; 716 717 static void cuda_receive_packet(CUDAState *s, 718 const uint8_t *data, int len) 719 { 720 uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] }; 721 int i, out_len = 0; 722 723 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 724 const CudaCommand *desc = &handlers[i]; 725 if (desc->command == data[0]) { 726 CUDA_DPRINTF("handling command %s\n", desc->name); 727 out_len = 0; 728 if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) { 729 cuda_send_packet_to_host(s, obuf, 3 + out_len); 730 } else { 731 qemu_log_mask(LOG_GUEST_ERROR, 732 "CUDA: %s: wrong parameters %d\n", 733 desc->name, len); 734 obuf[0] = ERROR_PACKET; 735 obuf[1] = 0x5; /* bad parameters */ 736 obuf[2] = CUDA_PACKET; 737 obuf[3] = data[0]; 738 cuda_send_packet_to_host(s, obuf, 4); 739 } 740 return; 741 } 742 } 743 744 qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]); 745 obuf[0] = ERROR_PACKET; 746 obuf[1] = 0x2; /* unknown command */ 747 obuf[2] = CUDA_PACKET; 748 obuf[3] = data[0]; 749 cuda_send_packet_to_host(s, obuf, 4); 750 } 751 752 static void cuda_receive_packet_from_host(CUDAState *s, 753 const uint8_t *data, int len) 754 { 755 #ifdef DEBUG_CUDA_PACKET 756 { 757 int i; 758 printf("cuda_receive_packet_from_host:\n"); 759 for(i = 0; i < len; i++) 760 printf(" %02x", data[i]); 761 printf("\n"); 762 } 763 #endif 764 switch(data[0]) { 765 case ADB_PACKET: 766 { 767 uint8_t obuf[ADB_MAX_OUT_LEN + 3]; 768 int olen; 769 olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1); 770 if (olen > 0) { 771 obuf[0] = ADB_PACKET; 772 obuf[1] = 0x00; 773 cuda_send_packet_to_host(s, obuf, olen + 2); 774 } else { 775 /* error */ 776 obuf[0] = ADB_PACKET; 777 obuf[1] = -olen; 778 obuf[2] = data[1]; 779 olen = 0; 780 cuda_send_packet_to_host(s, obuf, olen + 3); 781 } 782 } 783 break; 784 case CUDA_PACKET: 785 cuda_receive_packet(s, data + 1, len - 1); 786 break; 787 } 788 } 789 790 static const MemoryRegionOps cuda_ops = { 791 .read = cuda_read, 792 .write = cuda_write, 793 .endianness = DEVICE_BIG_ENDIAN, 794 .valid = { 795 .min_access_size = 1, 796 .max_access_size = 1, 797 }, 798 }; 799 800 static bool cuda_timer_exist(void *opaque, int version_id) 801 { 802 CUDATimer *s = opaque; 803 804 return s->timer != NULL; 805 } 806 807 static const VMStateDescription vmstate_cuda_timer = { 808 .name = "cuda_timer", 809 .version_id = 0, 810 .minimum_version_id = 0, 811 .fields = (VMStateField[]) { 812 VMSTATE_UINT16(latch, CUDATimer), 813 VMSTATE_UINT16(counter_value, CUDATimer), 814 VMSTATE_INT64(load_time, CUDATimer), 815 VMSTATE_INT64(next_irq_time, CUDATimer), 816 VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist), 817 VMSTATE_END_OF_LIST() 818 } 819 }; 820 821 static const VMStateDescription vmstate_cuda = { 822 .name = "cuda", 823 .version_id = 4, 824 .minimum_version_id = 4, 825 .fields = (VMStateField[]) { 826 VMSTATE_UINT8(a, CUDAState), 827 VMSTATE_UINT8(b, CUDAState), 828 VMSTATE_UINT8(last_b, CUDAState), 829 VMSTATE_UINT8(dira, CUDAState), 830 VMSTATE_UINT8(dirb, CUDAState), 831 VMSTATE_UINT8(sr, CUDAState), 832 VMSTATE_UINT8(acr, CUDAState), 833 VMSTATE_UINT8(last_acr, CUDAState), 834 VMSTATE_UINT8(pcr, CUDAState), 835 VMSTATE_UINT8(ifr, CUDAState), 836 VMSTATE_UINT8(ier, CUDAState), 837 VMSTATE_UINT8(anh, CUDAState), 838 VMSTATE_INT32(data_in_size, CUDAState), 839 VMSTATE_INT32(data_in_index, CUDAState), 840 VMSTATE_INT32(data_out_index, CUDAState), 841 VMSTATE_UINT8(autopoll, CUDAState), 842 VMSTATE_UINT8(autopoll_rate_ms, CUDAState), 843 VMSTATE_UINT16(adb_poll_mask, CUDAState), 844 VMSTATE_BUFFER(data_in, CUDAState), 845 VMSTATE_BUFFER(data_out, CUDAState), 846 VMSTATE_UINT32(tick_offset, CUDAState), 847 VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1, 848 vmstate_cuda_timer, CUDATimer), 849 VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState), 850 VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState), 851 VMSTATE_END_OF_LIST() 852 } 853 }; 854 855 static void cuda_reset(DeviceState *dev) 856 { 857 CUDAState *s = CUDA(dev); 858 859 s->b = 0; 860 s->a = 0; 861 s->dirb = 0xff; 862 s->dira = 0; 863 s->sr = 0; 864 s->acr = 0; 865 s->pcr = 0; 866 s->ifr = 0; 867 s->ier = 0; 868 // s->ier = T1_INT | SR_INT; 869 s->anh = 0; 870 s->data_in_size = 0; 871 s->data_in_index = 0; 872 s->data_out_index = 0; 873 s->autopoll = 0; 874 875 s->timers[0].latch = 0xffff; 876 set_counter(s, &s->timers[0], 0xffff); 877 878 s->timers[1].latch = 0xffff; 879 880 s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s); 881 } 882 883 static void cuda_realizefn(DeviceState *dev, Error **errp) 884 { 885 CUDAState *s = CUDA(dev); 886 struct tm tm; 887 888 s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s); 889 s->timers[0].frequency = CUDA_TIMER_FREQ; 890 s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s); 891 s->timers[1].frequency = (SCALE_US * 6000) / 4700; 892 893 qemu_get_timedate(&tm, 0); 894 s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; 895 896 s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s); 897 s->autopoll_rate_ms = 20; 898 s->adb_poll_mask = 0xffff; 899 } 900 901 static void cuda_initfn(Object *obj) 902 { 903 SysBusDevice *d = SYS_BUS_DEVICE(obj); 904 CUDAState *s = CUDA(obj); 905 int i; 906 907 memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000); 908 sysbus_init_mmio(d, &s->mem); 909 sysbus_init_irq(d, &s->irq); 910 911 for (i = 0; i < ARRAY_SIZE(s->timers); i++) { 912 s->timers[i].index = i; 913 } 914 915 qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, 916 DEVICE(obj), "adb.0"); 917 } 918 919 static Property cuda_properties[] = { 920 DEFINE_PROP_UINT64("timebase-frequency", CUDAState, tb_frequency, 0), 921 DEFINE_PROP_END_OF_LIST() 922 }; 923 924 static void cuda_class_init(ObjectClass *oc, void *data) 925 { 926 DeviceClass *dc = DEVICE_CLASS(oc); 927 928 dc->realize = cuda_realizefn; 929 dc->reset = cuda_reset; 930 dc->vmsd = &vmstate_cuda; 931 dc->props = cuda_properties; 932 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 933 } 934 935 static const TypeInfo cuda_type_info = { 936 .name = TYPE_CUDA, 937 .parent = TYPE_SYS_BUS_DEVICE, 938 .instance_size = sizeof(CUDAState), 939 .instance_init = cuda_initfn, 940 .class_init = cuda_class_init, 941 }; 942 943 static void cuda_register_types(void) 944 { 945 type_register_static(&cuda_type_info); 946 } 947 948 type_init(cuda_register_types) 949