1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use core::ptr::{addr_of, addr_of_mut, NonNull}; 6 use std::{ 7 ffi::CStr, 8 os::raw::{c_int, c_void}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 error_fatal, hwaddr, memory_region_init_io, qdev_prop_set_chr, qemu_chr_fe_accept_input, 14 qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, qemu_chr_fe_write_all, qemu_irq, 15 sysbus_connect_irq, sysbus_mmio_map, sysbus_realize, CharBackend, Chardev, MemoryRegion, 16 QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 17 }, 18 c_str, impl_vmstate_forward, 19 irq::InterruptSource, 20 prelude::*, 21 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 22 qom::{ClassInitImpl, ObjectImpl, Owned, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceClass}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 memory_ops::PL011_OPS, 30 registers::{self, Interrupt}, 31 RegisterOffset, 32 }; 33 34 /// Integer Baud Rate Divider, `UARTIBRD` 35 const IBRD_MASK: u32 = 0xffff; 36 37 /// Fractional Baud Rate Divider, `UARTFBRD` 38 const FBRD_MASK: u32 = 0x3f; 39 40 /// QEMU sourced constant. 41 pub const PL011_FIFO_DEPTH: u32 = 16; 42 43 #[derive(Clone, Copy)] 44 struct DeviceId(&'static [u8; 8]); 45 46 impl std::ops::Index<hwaddr> for DeviceId { 47 type Output = u8; 48 49 fn index(&self, idx: hwaddr) -> &Self::Output { 50 &self.0[idx as usize] 51 } 52 } 53 54 impl DeviceId { 55 const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 56 const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 57 } 58 59 // FIFOs use 32-bit indices instead of usize, for compatibility with 60 // the migration stream produced by the C version of this device. 61 #[repr(transparent)] 62 #[derive(Debug, Default)] 63 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 64 impl_vmstate_forward!(Fifo); 65 66 impl Fifo { 67 const fn len(&self) -> u32 { 68 self.0.len() as u32 69 } 70 } 71 72 impl std::ops::IndexMut<u32> for Fifo { 73 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 74 &mut self.0[idx as usize] 75 } 76 } 77 78 impl std::ops::Index<u32> for Fifo { 79 type Output = registers::Data; 80 81 fn index(&self, idx: u32) -> &Self::Output { 82 &self.0[idx as usize] 83 } 84 } 85 86 #[repr(C)] 87 #[derive(Debug, Default, qemu_api_macros::offsets)] 88 pub struct PL011Registers { 89 #[doc(alias = "fr")] 90 pub flags: registers::Flags, 91 #[doc(alias = "lcr")] 92 pub line_control: registers::LineControl, 93 #[doc(alias = "rsr")] 94 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 95 #[doc(alias = "cr")] 96 pub control: registers::Control, 97 pub dmacr: u32, 98 pub int_enabled: u32, 99 pub int_level: u32, 100 pub read_fifo: Fifo, 101 pub ilpr: u32, 102 pub ibrd: u32, 103 pub fbrd: u32, 104 pub ifl: u32, 105 pub read_pos: u32, 106 pub read_count: u32, 107 pub read_trigger: u32, 108 } 109 110 #[repr(C)] 111 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 112 /// PL011 Device Model in QEMU 113 pub struct PL011State { 114 pub parent_obj: ParentField<SysBusDevice>, 115 pub iomem: MemoryRegion, 116 #[doc(alias = "chr")] 117 pub char_backend: CharBackend, 118 pub regs: BqlRefCell<PL011Registers>, 119 /// QEMU interrupts 120 /// 121 /// ```text 122 /// * sysbus MMIO region 0: device registers 123 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 124 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 125 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 126 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 127 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 128 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 129 /// ``` 130 #[doc(alias = "irq")] 131 pub interrupts: [InterruptSource; IRQMASK.len()], 132 #[doc(alias = "clk")] 133 pub clock: Owned<Clock>, 134 #[doc(alias = "migrate_clk")] 135 pub migrate_clock: bool, 136 } 137 138 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 139 140 #[repr(C)] 141 pub struct PL011Class { 142 parent_class: <SysBusDevice as ObjectType>::Class, 143 /// The byte string that identifies the device. 144 device_id: DeviceId, 145 } 146 147 unsafe impl ObjectType for PL011State { 148 type Class = PL011Class; 149 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 150 } 151 152 impl ClassInitImpl<PL011Class> for PL011State { 153 fn class_init(klass: &mut PL011Class) { 154 klass.device_id = DeviceId::ARM; 155 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 156 } 157 } 158 159 impl ObjectImpl for PL011State { 160 type ParentType = SysBusDevice; 161 162 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 163 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 164 } 165 166 impl DeviceImpl for PL011State { 167 fn properties() -> &'static [Property] { 168 &device_class::PL011_PROPERTIES 169 } 170 fn vmsd() -> Option<&'static VMStateDescription> { 171 Some(&device_class::VMSTATE_PL011) 172 } 173 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 174 } 175 176 impl ResettablePhasesImpl for PL011State { 177 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 178 } 179 180 impl PL011Registers { 181 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 182 use RegisterOffset::*; 183 184 let mut update = false; 185 let result = match offset { 186 DR => { 187 self.flags.set_receive_fifo_full(false); 188 let c = self.read_fifo[self.read_pos]; 189 if self.read_count > 0 { 190 self.read_count -= 1; 191 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 192 } 193 if self.read_count == 0 { 194 self.flags.set_receive_fifo_empty(true); 195 } 196 if self.read_count + 1 == self.read_trigger { 197 self.int_level &= !Interrupt::RX.0; 198 } 199 // Update error bits. 200 self.receive_status_error_clear.set_from_data(c); 201 // Must call qemu_chr_fe_accept_input 202 update = true; 203 u32::from(c) 204 } 205 RSR => u32::from(self.receive_status_error_clear), 206 FR => u32::from(self.flags), 207 FBRD => self.fbrd, 208 ILPR => self.ilpr, 209 IBRD => self.ibrd, 210 LCR_H => u32::from(self.line_control), 211 CR => u32::from(self.control), 212 FLS => self.ifl, 213 IMSC => self.int_enabled, 214 RIS => self.int_level, 215 MIS => self.int_level & self.int_enabled, 216 ICR => { 217 // "The UARTICR Register is the interrupt clear register and is write-only" 218 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 219 0 220 } 221 DMACR => self.dmacr, 222 }; 223 (update, result) 224 } 225 226 pub(self) fn write( 227 &mut self, 228 offset: RegisterOffset, 229 value: u32, 230 char_backend: *mut CharBackend, 231 ) -> bool { 232 // eprintln!("write offset {offset} value {value}"); 233 use RegisterOffset::*; 234 match offset { 235 DR => { 236 // interrupts always checked 237 let _ = self.loopback_tx(value); 238 self.int_level |= Interrupt::TX.0; 239 return true; 240 } 241 RSR => { 242 self.receive_status_error_clear = 0.into(); 243 } 244 FR => { 245 // flag writes are ignored 246 } 247 ILPR => { 248 self.ilpr = value; 249 } 250 IBRD => { 251 self.ibrd = value; 252 } 253 FBRD => { 254 self.fbrd = value; 255 } 256 LCR_H => { 257 let new_val: registers::LineControl = value.into(); 258 // Reset the FIFO state on FIFO enable or disable 259 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 260 self.reset_rx_fifo(); 261 self.reset_tx_fifo(); 262 } 263 let update = (self.line_control.send_break() != new_val.send_break()) && { 264 let mut break_enable: c_int = new_val.send_break().into(); 265 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 266 // initialized in realize(). 267 unsafe { 268 qemu_chr_fe_ioctl( 269 char_backend, 270 CHR_IOCTL_SERIAL_SET_BREAK as i32, 271 addr_of_mut!(break_enable).cast::<c_void>(), 272 ); 273 } 274 self.loopback_break(break_enable > 0) 275 }; 276 self.line_control = new_val; 277 self.set_read_trigger(); 278 return update; 279 } 280 CR => { 281 // ??? Need to implement the enable bit. 282 self.control = value.into(); 283 return self.loopback_mdmctrl(); 284 } 285 FLS => { 286 self.ifl = value; 287 self.set_read_trigger(); 288 } 289 IMSC => { 290 self.int_enabled = value; 291 return true; 292 } 293 RIS => {} 294 MIS => {} 295 ICR => { 296 self.int_level &= !value; 297 return true; 298 } 299 DMACR => { 300 self.dmacr = value; 301 if value & 3 > 0 { 302 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 303 eprintln!("pl011: DMA not implemented"); 304 } 305 } 306 } 307 false 308 } 309 310 #[inline] 311 #[must_use] 312 fn loopback_tx(&mut self, value: u32) -> bool { 313 // Caveat: 314 // 315 // In real hardware, TX loopback happens at the serial-bit level 316 // and then reassembled by the RX logics back into bytes and placed 317 // into the RX fifo. That is, loopback happens after TX fifo. 318 // 319 // Because the real hardware TX fifo is time-drained at the frame 320 // rate governed by the configured serial format, some loopback 321 // bytes in TX fifo may still be able to get into the RX fifo 322 // that could be full at times while being drained at software 323 // pace. 324 // 325 // In such scenario, the RX draining pace is the major factor 326 // deciding which loopback bytes get into the RX fifo, unless 327 // hardware flow-control is enabled. 328 // 329 // For simplicity, the above described is not emulated. 330 self.loopback_enabled() && self.put_fifo(value) 331 } 332 333 #[must_use] 334 fn loopback_mdmctrl(&mut self) -> bool { 335 if !self.loopback_enabled() { 336 return false; 337 } 338 339 /* 340 * Loopback software-driven modem control outputs to modem status inputs: 341 * FR.RI <= CR.Out2 342 * FR.DCD <= CR.Out1 343 * FR.CTS <= CR.RTS 344 * FR.DSR <= CR.DTR 345 * 346 * The loopback happens immediately even if this call is triggered 347 * by setting only CR.LBE. 348 * 349 * CTS/RTS updates due to enabled hardware flow controls are not 350 * dealt with here. 351 */ 352 353 self.flags.set_ring_indicator(self.control.out_2()); 354 self.flags.set_data_carrier_detect(self.control.out_1()); 355 self.flags.set_clear_to_send(self.control.request_to_send()); 356 self.flags 357 .set_data_set_ready(self.control.data_transmit_ready()); 358 359 // Change interrupts based on updated FR 360 let mut il = self.int_level; 361 362 il &= !Interrupt::MS.0; 363 364 if self.flags.data_set_ready() { 365 il |= Interrupt::DSR.0; 366 } 367 if self.flags.data_carrier_detect() { 368 il |= Interrupt::DCD.0; 369 } 370 if self.flags.clear_to_send() { 371 il |= Interrupt::CTS.0; 372 } 373 if self.flags.ring_indicator() { 374 il |= Interrupt::RI.0; 375 } 376 self.int_level = il; 377 true 378 } 379 380 fn loopback_break(&mut self, enable: bool) -> bool { 381 enable && self.loopback_tx(registers::Data::BREAK.into()) 382 } 383 384 fn set_read_trigger(&mut self) { 385 self.read_trigger = 1; 386 } 387 388 pub fn reset(&mut self) { 389 self.line_control.reset(); 390 self.receive_status_error_clear.reset(); 391 self.dmacr = 0; 392 self.int_enabled = 0; 393 self.int_level = 0; 394 self.ilpr = 0; 395 self.ibrd = 0; 396 self.fbrd = 0; 397 self.read_trigger = 1; 398 self.ifl = 0x12; 399 self.control.reset(); 400 self.flags.reset(); 401 self.reset_rx_fifo(); 402 self.reset_tx_fifo(); 403 } 404 405 pub fn reset_rx_fifo(&mut self) { 406 self.read_count = 0; 407 self.read_pos = 0; 408 409 // Reset FIFO flags 410 self.flags.set_receive_fifo_full(false); 411 self.flags.set_receive_fifo_empty(true); 412 } 413 414 pub fn reset_tx_fifo(&mut self) { 415 // Reset FIFO flags 416 self.flags.set_transmit_fifo_full(false); 417 self.flags.set_transmit_fifo_empty(true); 418 } 419 420 #[inline] 421 pub fn fifo_enabled(&self) -> bool { 422 self.line_control.fifos_enabled() == registers::Mode::FIFO 423 } 424 425 #[inline] 426 pub fn loopback_enabled(&self) -> bool { 427 self.control.enable_loopback() 428 } 429 430 #[inline] 431 pub fn fifo_depth(&self) -> u32 { 432 // Note: FIFO depth is expected to be power-of-2 433 if self.fifo_enabled() { 434 return PL011_FIFO_DEPTH; 435 } 436 1 437 } 438 439 #[must_use] 440 pub fn put_fifo(&mut self, value: u32) -> bool { 441 let depth = self.fifo_depth(); 442 assert!(depth > 0); 443 let slot = (self.read_pos + self.read_count) & (depth - 1); 444 self.read_fifo[slot] = registers::Data::from(value); 445 self.read_count += 1; 446 self.flags.set_receive_fifo_empty(false); 447 if self.read_count == depth { 448 self.flags.set_receive_fifo_full(true); 449 } 450 451 if self.read_count == self.read_trigger { 452 self.int_level |= Interrupt::RX.0; 453 return true; 454 } 455 false 456 } 457 458 pub fn post_load(&mut self) -> Result<(), ()> { 459 /* Sanity-check input state */ 460 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 461 return Err(()); 462 } 463 464 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 465 // Older versions of PL011 didn't ensure that the single 466 // character in the FIFO in FIFO-disabled mode is in 467 // element 0 of the array; convert to follow the current 468 // code's assumptions. 469 self.read_fifo[0] = self.read_fifo[self.read_pos]; 470 self.read_pos = 0; 471 } 472 473 self.ibrd &= IBRD_MASK; 474 self.fbrd &= FBRD_MASK; 475 476 Ok(()) 477 } 478 } 479 480 impl PL011State { 481 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 482 /// 483 /// # Safety 484 /// 485 /// `self` must point to a correctly sized and aligned location for the 486 /// `PL011State` type. It must not be called more than once on the same 487 /// location/instance. All its fields are expected to hold unitialized 488 /// values with the sole exception of `parent_obj`. 489 unsafe fn init(&mut self) { 490 // SAFETY: 491 // 492 // self and self.iomem are guaranteed to be valid at this point since callers 493 // must make sure the `self` reference is valid. 494 unsafe { 495 memory_region_init_io( 496 addr_of_mut!(self.iomem), 497 addr_of_mut!(*self).cast::<Object>(), 498 &PL011_OPS, 499 addr_of_mut!(*self).cast::<c_void>(), 500 Self::TYPE_NAME.as_ptr(), 501 0x1000, 502 ); 503 } 504 505 self.regs = Default::default(); 506 507 // SAFETY: 508 // 509 // self.clock is not initialized at this point; but since `Owned<_>` is 510 // not Drop, we can overwrite the undefined value without side effects; 511 // it's not sound but, because for all PL011State instances are created 512 // by QOM code which calls this function to initialize the fields, at 513 // leastno code is able to access an invalid self.clock value. 514 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 515 } 516 517 const fn clock_update(&self, _event: ClockEvent) { 518 /* pl011_trace_baudrate_change(s); */ 519 } 520 521 fn post_init(&self) { 522 self.init_mmio(&self.iomem); 523 for irq in self.interrupts.iter() { 524 self.init_irq(irq); 525 } 526 } 527 528 pub fn read(&mut self, offset: hwaddr, _size: u32) -> u64 { 529 match RegisterOffset::try_from(offset) { 530 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 531 let device_id = self.get_class().device_id; 532 u64::from(device_id[(offset - 0xfe0) >> 2]) 533 } 534 Err(_) => { 535 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 536 0 537 } 538 Ok(field) => { 539 let (update_irq, result) = self.regs.borrow_mut().read(field); 540 if update_irq { 541 self.update(); 542 unsafe { 543 qemu_chr_fe_accept_input(&mut self.char_backend); 544 } 545 } 546 result.into() 547 } 548 } 549 } 550 551 pub fn write(&mut self, offset: hwaddr, value: u64) { 552 let mut update_irq = false; 553 if let Ok(field) = RegisterOffset::try_from(offset) { 554 // qemu_chr_fe_write_all() calls into the can_receive 555 // callback, so handle writes before entering PL011Registers. 556 if field == RegisterOffset::DR { 557 // ??? Check if transmitter is enabled. 558 let ch: u8 = value as u8; 559 // SAFETY: char_backend is a valid CharBackend instance after it's been 560 // initialized in realize(). 561 // XXX this blocks entire thread. Rewrite to use 562 // qemu_chr_fe_write and background I/O callbacks 563 unsafe { 564 qemu_chr_fe_write_all(&mut self.char_backend, &ch, 1); 565 } 566 } 567 568 update_irq = self 569 .regs 570 .borrow_mut() 571 .write(field, value as u32, &mut self.char_backend); 572 } else { 573 eprintln!("write bad offset {offset} value {value}"); 574 } 575 if update_irq { 576 self.update(); 577 } 578 } 579 580 pub fn can_receive(&self) -> bool { 581 // trace_pl011_can_receive(s->lcr, s->read_count, r); 582 let regs = self.regs.borrow(); 583 regs.read_count < regs.fifo_depth() 584 } 585 586 pub fn receive(&self, ch: u32) { 587 let mut regs = self.regs.borrow_mut(); 588 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 589 // Release the BqlRefCell before calling self.update() 590 drop(regs); 591 592 if update_irq { 593 self.update(); 594 } 595 } 596 597 pub fn event(&self, event: QEMUChrEvent) { 598 let mut update_irq = false; 599 let mut regs = self.regs.borrow_mut(); 600 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 601 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 602 } 603 // Release the BqlRefCell before calling self.update() 604 drop(regs); 605 606 if update_irq { 607 self.update() 608 } 609 } 610 611 pub fn realize(&self) { 612 // SAFETY: self.char_backend has the correct size and alignment for a 613 // CharBackend object, and its callbacks are of the correct types. 614 unsafe { 615 qemu_chr_fe_set_handlers( 616 addr_of!(self.char_backend) as *mut CharBackend, 617 Some(pl011_can_receive), 618 Some(pl011_receive), 619 Some(pl011_event), 620 None, 621 addr_of!(*self).cast::<c_void>() as *mut c_void, 622 core::ptr::null_mut(), 623 true, 624 ); 625 } 626 } 627 628 pub fn reset_hold(&self, _type: ResetType) { 629 self.regs.borrow_mut().reset(); 630 } 631 632 pub fn update(&self) { 633 let regs = self.regs.borrow(); 634 let flags = regs.int_level & regs.int_enabled; 635 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 636 irq.set(flags & i != 0); 637 } 638 } 639 640 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 641 self.regs.borrow_mut().post_load() 642 } 643 } 644 645 /// Which bits in the interrupt status matter for each outbound IRQ line ? 646 const IRQMASK: [u32; 6] = [ 647 /* combined IRQ */ 648 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 649 Interrupt::RX.0, 650 Interrupt::TX.0, 651 Interrupt::RT.0, 652 Interrupt::MS.0, 653 Interrupt::E.0, 654 ]; 655 656 /// # Safety 657 /// 658 /// We expect the FFI user of this function to pass a valid pointer, that has 659 /// the same size as [`PL011State`]. We also expect the device is 660 /// readable/writeable from one thread at any time. 661 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 662 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 663 unsafe { state.as_ref().can_receive().into() } 664 } 665 666 /// # Safety 667 /// 668 /// We expect the FFI user of this function to pass a valid pointer, that has 669 /// the same size as [`PL011State`]. We also expect the device is 670 /// readable/writeable from one thread at any time. 671 /// 672 /// The buffer and size arguments must also be valid. 673 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 674 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 675 unsafe { 676 if size > 0 { 677 debug_assert!(!buf.is_null()); 678 state.as_ref().receive(u32::from(buf.read_volatile())); 679 } 680 } 681 } 682 683 /// # Safety 684 /// 685 /// We expect the FFI user of this function to pass a valid pointer, that has 686 /// the same size as [`PL011State`]. We also expect the device is 687 /// readable/writeable from one thread at any time. 688 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 689 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 690 unsafe { state.as_ref().event(event) } 691 } 692 693 /// # Safety 694 /// 695 /// We expect the FFI user of this function to pass a valid pointer for `chr`. 696 #[no_mangle] 697 pub unsafe extern "C" fn pl011_create( 698 addr: u64, 699 irq: qemu_irq, 700 chr: *mut Chardev, 701 ) -> *mut DeviceState { 702 let pl011 = PL011State::new(); 703 unsafe { 704 let dev = pl011.as_mut_ptr::<DeviceState>(); 705 qdev_prop_set_chr(dev, c_str!("chardev").as_ptr(), chr); 706 707 let sysbus = pl011.as_mut_ptr::<SysBusDevice>(); 708 sysbus_realize(sysbus, addr_of_mut!(error_fatal)); 709 sysbus_mmio_map(sysbus, 0, addr); 710 sysbus_connect_irq(sysbus, 0, irq); 711 712 // return the pointer, which is kept alive by the QOM tree; drop owned ref 713 pl011.as_mut_ptr() 714 } 715 } 716 717 #[repr(C)] 718 #[derive(qemu_api_macros::Object)] 719 /// PL011 Luminary device model. 720 pub struct PL011Luminary { 721 parent_obj: ParentField<PL011State>, 722 } 723 724 impl ClassInitImpl<PL011Class> for PL011Luminary { 725 fn class_init(klass: &mut PL011Class) { 726 klass.device_id = DeviceId::LUMINARY; 727 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 728 } 729 } 730 731 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 732 733 unsafe impl ObjectType for PL011Luminary { 734 type Class = <PL011State as ObjectType>::Class; 735 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 736 } 737 738 impl ObjectImpl for PL011Luminary { 739 type ParentType = PL011State; 740 } 741 742 impl DeviceImpl for PL011Luminary {} 743 impl ResettablePhasesImpl for PL011Luminary {} 744