1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use core::ptr::{addr_of, addr_of_mut, NonNull}; 6 use std::{ 7 ffi::CStr, 8 os::raw::{c_int, c_void}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 qemu_chr_fe_accept_input, qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, 14 qemu_chr_fe_write_all, CharBackend, QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 15 }, 16 chardev::Chardev, 17 impl_vmstate_forward, 18 irq::{IRQState, InterruptSource}, 19 memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, 20 prelude::*, 21 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 22 qom::{ClassInitImpl, ObjectImpl, Owned, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceClass}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 registers::{self, Interrupt}, 30 RegisterOffset, 31 }; 32 33 /// Integer Baud Rate Divider, `UARTIBRD` 34 const IBRD_MASK: u32 = 0xffff; 35 36 /// Fractional Baud Rate Divider, `UARTFBRD` 37 const FBRD_MASK: u32 = 0x3f; 38 39 /// QEMU sourced constant. 40 pub const PL011_FIFO_DEPTH: u32 = 16; 41 42 #[derive(Clone, Copy)] 43 struct DeviceId(&'static [u8; 8]); 44 45 impl std::ops::Index<hwaddr> for DeviceId { 46 type Output = u8; 47 48 fn index(&self, idx: hwaddr) -> &Self::Output { 49 &self.0[idx as usize] 50 } 51 } 52 53 impl DeviceId { 54 const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 55 const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 56 } 57 58 // FIFOs use 32-bit indices instead of usize, for compatibility with 59 // the migration stream produced by the C version of this device. 60 #[repr(transparent)] 61 #[derive(Debug, Default)] 62 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 63 impl_vmstate_forward!(Fifo); 64 65 impl Fifo { 66 const fn len(&self) -> u32 { 67 self.0.len() as u32 68 } 69 } 70 71 impl std::ops::IndexMut<u32> for Fifo { 72 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 73 &mut self.0[idx as usize] 74 } 75 } 76 77 impl std::ops::Index<u32> for Fifo { 78 type Output = registers::Data; 79 80 fn index(&self, idx: u32) -> &Self::Output { 81 &self.0[idx as usize] 82 } 83 } 84 85 #[repr(C)] 86 #[derive(Debug, Default, qemu_api_macros::offsets)] 87 pub struct PL011Registers { 88 #[doc(alias = "fr")] 89 pub flags: registers::Flags, 90 #[doc(alias = "lcr")] 91 pub line_control: registers::LineControl, 92 #[doc(alias = "rsr")] 93 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 94 #[doc(alias = "cr")] 95 pub control: registers::Control, 96 pub dmacr: u32, 97 pub int_enabled: u32, 98 pub int_level: u32, 99 pub read_fifo: Fifo, 100 pub ilpr: u32, 101 pub ibrd: u32, 102 pub fbrd: u32, 103 pub ifl: u32, 104 pub read_pos: u32, 105 pub read_count: u32, 106 pub read_trigger: u32, 107 } 108 109 #[repr(C)] 110 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 111 /// PL011 Device Model in QEMU 112 pub struct PL011State { 113 pub parent_obj: ParentField<SysBusDevice>, 114 pub iomem: MemoryRegion, 115 #[doc(alias = "chr")] 116 pub char_backend: CharBackend, 117 pub regs: BqlRefCell<PL011Registers>, 118 /// QEMU interrupts 119 /// 120 /// ```text 121 /// * sysbus MMIO region 0: device registers 122 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 123 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 124 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 125 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 126 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 127 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 128 /// ``` 129 #[doc(alias = "irq")] 130 pub interrupts: [InterruptSource; IRQMASK.len()], 131 #[doc(alias = "clk")] 132 pub clock: Owned<Clock>, 133 #[doc(alias = "migrate_clk")] 134 pub migrate_clock: bool, 135 } 136 137 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 138 139 #[repr(C)] 140 pub struct PL011Class { 141 parent_class: <SysBusDevice as ObjectType>::Class, 142 /// The byte string that identifies the device. 143 device_id: DeviceId, 144 } 145 146 unsafe impl ObjectType for PL011State { 147 type Class = PL011Class; 148 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 149 } 150 151 impl ClassInitImpl<PL011Class> for PL011State { 152 fn class_init(klass: &mut PL011Class) { 153 klass.device_id = DeviceId::ARM; 154 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 155 } 156 } 157 158 impl ObjectImpl for PL011State { 159 type ParentType = SysBusDevice; 160 161 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 162 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 163 } 164 165 impl DeviceImpl for PL011State { 166 fn properties() -> &'static [Property] { 167 &device_class::PL011_PROPERTIES 168 } 169 fn vmsd() -> Option<&'static VMStateDescription> { 170 Some(&device_class::VMSTATE_PL011) 171 } 172 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 173 } 174 175 impl ResettablePhasesImpl for PL011State { 176 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 177 } 178 179 impl PL011Registers { 180 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 181 use RegisterOffset::*; 182 183 let mut update = false; 184 let result = match offset { 185 DR => { 186 self.flags.set_receive_fifo_full(false); 187 let c = self.read_fifo[self.read_pos]; 188 if self.read_count > 0 { 189 self.read_count -= 1; 190 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 191 } 192 if self.read_count == 0 { 193 self.flags.set_receive_fifo_empty(true); 194 } 195 if self.read_count + 1 == self.read_trigger { 196 self.int_level &= !Interrupt::RX.0; 197 } 198 // Update error bits. 199 self.receive_status_error_clear.set_from_data(c); 200 // Must call qemu_chr_fe_accept_input 201 update = true; 202 u32::from(c) 203 } 204 RSR => u32::from(self.receive_status_error_clear), 205 FR => u32::from(self.flags), 206 FBRD => self.fbrd, 207 ILPR => self.ilpr, 208 IBRD => self.ibrd, 209 LCR_H => u32::from(self.line_control), 210 CR => u32::from(self.control), 211 FLS => self.ifl, 212 IMSC => self.int_enabled, 213 RIS => self.int_level, 214 MIS => self.int_level & self.int_enabled, 215 ICR => { 216 // "The UARTICR Register is the interrupt clear register and is write-only" 217 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 218 0 219 } 220 DMACR => self.dmacr, 221 }; 222 (update, result) 223 } 224 225 pub(self) fn write( 226 &mut self, 227 offset: RegisterOffset, 228 value: u32, 229 char_backend: *mut CharBackend, 230 ) -> bool { 231 // eprintln!("write offset {offset} value {value}"); 232 use RegisterOffset::*; 233 match offset { 234 DR => { 235 // interrupts always checked 236 let _ = self.loopback_tx(value); 237 self.int_level |= Interrupt::TX.0; 238 return true; 239 } 240 RSR => { 241 self.receive_status_error_clear = 0.into(); 242 } 243 FR => { 244 // flag writes are ignored 245 } 246 ILPR => { 247 self.ilpr = value; 248 } 249 IBRD => { 250 self.ibrd = value; 251 } 252 FBRD => { 253 self.fbrd = value; 254 } 255 LCR_H => { 256 let new_val: registers::LineControl = value.into(); 257 // Reset the FIFO state on FIFO enable or disable 258 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 259 self.reset_rx_fifo(); 260 self.reset_tx_fifo(); 261 } 262 let update = (self.line_control.send_break() != new_val.send_break()) && { 263 let mut break_enable: c_int = new_val.send_break().into(); 264 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 265 // initialized in realize(). 266 unsafe { 267 qemu_chr_fe_ioctl( 268 char_backend, 269 CHR_IOCTL_SERIAL_SET_BREAK as i32, 270 addr_of_mut!(break_enable).cast::<c_void>(), 271 ); 272 } 273 self.loopback_break(break_enable > 0) 274 }; 275 self.line_control = new_val; 276 self.set_read_trigger(); 277 return update; 278 } 279 CR => { 280 // ??? Need to implement the enable bit. 281 self.control = value.into(); 282 return self.loopback_mdmctrl(); 283 } 284 FLS => { 285 self.ifl = value; 286 self.set_read_trigger(); 287 } 288 IMSC => { 289 self.int_enabled = value; 290 return true; 291 } 292 RIS => {} 293 MIS => {} 294 ICR => { 295 self.int_level &= !value; 296 return true; 297 } 298 DMACR => { 299 self.dmacr = value; 300 if value & 3 > 0 { 301 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 302 eprintln!("pl011: DMA not implemented"); 303 } 304 } 305 } 306 false 307 } 308 309 #[inline] 310 #[must_use] 311 fn loopback_tx(&mut self, value: u32) -> bool { 312 // Caveat: 313 // 314 // In real hardware, TX loopback happens at the serial-bit level 315 // and then reassembled by the RX logics back into bytes and placed 316 // into the RX fifo. That is, loopback happens after TX fifo. 317 // 318 // Because the real hardware TX fifo is time-drained at the frame 319 // rate governed by the configured serial format, some loopback 320 // bytes in TX fifo may still be able to get into the RX fifo 321 // that could be full at times while being drained at software 322 // pace. 323 // 324 // In such scenario, the RX draining pace is the major factor 325 // deciding which loopback bytes get into the RX fifo, unless 326 // hardware flow-control is enabled. 327 // 328 // For simplicity, the above described is not emulated. 329 self.loopback_enabled() && self.put_fifo(value) 330 } 331 332 #[must_use] 333 fn loopback_mdmctrl(&mut self) -> bool { 334 if !self.loopback_enabled() { 335 return false; 336 } 337 338 /* 339 * Loopback software-driven modem control outputs to modem status inputs: 340 * FR.RI <= CR.Out2 341 * FR.DCD <= CR.Out1 342 * FR.CTS <= CR.RTS 343 * FR.DSR <= CR.DTR 344 * 345 * The loopback happens immediately even if this call is triggered 346 * by setting only CR.LBE. 347 * 348 * CTS/RTS updates due to enabled hardware flow controls are not 349 * dealt with here. 350 */ 351 352 self.flags.set_ring_indicator(self.control.out_2()); 353 self.flags.set_data_carrier_detect(self.control.out_1()); 354 self.flags.set_clear_to_send(self.control.request_to_send()); 355 self.flags 356 .set_data_set_ready(self.control.data_transmit_ready()); 357 358 // Change interrupts based on updated FR 359 let mut il = self.int_level; 360 361 il &= !Interrupt::MS.0; 362 363 if self.flags.data_set_ready() { 364 il |= Interrupt::DSR.0; 365 } 366 if self.flags.data_carrier_detect() { 367 il |= Interrupt::DCD.0; 368 } 369 if self.flags.clear_to_send() { 370 il |= Interrupt::CTS.0; 371 } 372 if self.flags.ring_indicator() { 373 il |= Interrupt::RI.0; 374 } 375 self.int_level = il; 376 true 377 } 378 379 fn loopback_break(&mut self, enable: bool) -> bool { 380 enable && self.loopback_tx(registers::Data::BREAK.into()) 381 } 382 383 fn set_read_trigger(&mut self) { 384 self.read_trigger = 1; 385 } 386 387 pub fn reset(&mut self) { 388 self.line_control.reset(); 389 self.receive_status_error_clear.reset(); 390 self.dmacr = 0; 391 self.int_enabled = 0; 392 self.int_level = 0; 393 self.ilpr = 0; 394 self.ibrd = 0; 395 self.fbrd = 0; 396 self.read_trigger = 1; 397 self.ifl = 0x12; 398 self.control.reset(); 399 self.flags.reset(); 400 self.reset_rx_fifo(); 401 self.reset_tx_fifo(); 402 } 403 404 pub fn reset_rx_fifo(&mut self) { 405 self.read_count = 0; 406 self.read_pos = 0; 407 408 // Reset FIFO flags 409 self.flags.set_receive_fifo_full(false); 410 self.flags.set_receive_fifo_empty(true); 411 } 412 413 pub fn reset_tx_fifo(&mut self) { 414 // Reset FIFO flags 415 self.flags.set_transmit_fifo_full(false); 416 self.flags.set_transmit_fifo_empty(true); 417 } 418 419 #[inline] 420 pub fn fifo_enabled(&self) -> bool { 421 self.line_control.fifos_enabled() == registers::Mode::FIFO 422 } 423 424 #[inline] 425 pub fn loopback_enabled(&self) -> bool { 426 self.control.enable_loopback() 427 } 428 429 #[inline] 430 pub fn fifo_depth(&self) -> u32 { 431 // Note: FIFO depth is expected to be power-of-2 432 if self.fifo_enabled() { 433 return PL011_FIFO_DEPTH; 434 } 435 1 436 } 437 438 #[must_use] 439 pub fn put_fifo(&mut self, value: u32) -> bool { 440 let depth = self.fifo_depth(); 441 assert!(depth > 0); 442 let slot = (self.read_pos + self.read_count) & (depth - 1); 443 self.read_fifo[slot] = registers::Data::from(value); 444 self.read_count += 1; 445 self.flags.set_receive_fifo_empty(false); 446 if self.read_count == depth { 447 self.flags.set_receive_fifo_full(true); 448 } 449 450 if self.read_count == self.read_trigger { 451 self.int_level |= Interrupt::RX.0; 452 return true; 453 } 454 false 455 } 456 457 pub fn post_load(&mut self) -> Result<(), ()> { 458 /* Sanity-check input state */ 459 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 460 return Err(()); 461 } 462 463 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 464 // Older versions of PL011 didn't ensure that the single 465 // character in the FIFO in FIFO-disabled mode is in 466 // element 0 of the array; convert to follow the current 467 // code's assumptions. 468 self.read_fifo[0] = self.read_fifo[self.read_pos]; 469 self.read_pos = 0; 470 } 471 472 self.ibrd &= IBRD_MASK; 473 self.fbrd &= FBRD_MASK; 474 475 Ok(()) 476 } 477 } 478 479 impl PL011State { 480 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 481 /// 482 /// # Safety 483 /// 484 /// `self` must point to a correctly sized and aligned location for the 485 /// `PL011State` type. It must not be called more than once on the same 486 /// location/instance. All its fields are expected to hold unitialized 487 /// values with the sole exception of `parent_obj`. 488 unsafe fn init(&mut self) { 489 static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new() 490 .read(&PL011State::read) 491 .write(&PL011State::write) 492 .native_endian() 493 .impl_sizes(4, 4) 494 .build(); 495 496 // SAFETY: 497 // 498 // self and self.iomem are guaranteed to be valid at this point since callers 499 // must make sure the `self` reference is valid. 500 MemoryRegion::init_io( 501 unsafe { &mut *addr_of_mut!(self.iomem) }, 502 addr_of_mut!(*self), 503 &PL011_OPS, 504 "pl011", 505 0x1000, 506 ); 507 508 self.regs = Default::default(); 509 510 // SAFETY: 511 // 512 // self.clock is not initialized at this point; but since `Owned<_>` is 513 // not Drop, we can overwrite the undefined value without side effects; 514 // it's not sound but, because for all PL011State instances are created 515 // by QOM code which calls this function to initialize the fields, at 516 // leastno code is able to access an invalid self.clock value. 517 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 518 } 519 520 const fn clock_update(&self, _event: ClockEvent) { 521 /* pl011_trace_baudrate_change(s); */ 522 } 523 524 fn post_init(&self) { 525 self.init_mmio(&self.iomem); 526 for irq in self.interrupts.iter() { 527 self.init_irq(irq); 528 } 529 } 530 531 pub fn read(&self, offset: hwaddr, _size: u32) -> u64 { 532 match RegisterOffset::try_from(offset) { 533 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 534 let device_id = self.get_class().device_id; 535 u64::from(device_id[(offset - 0xfe0) >> 2]) 536 } 537 Err(_) => { 538 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 539 0 540 } 541 Ok(field) => { 542 let (update_irq, result) = self.regs.borrow_mut().read(field); 543 if update_irq { 544 self.update(); 545 unsafe { 546 qemu_chr_fe_accept_input(addr_of!(self.char_backend) as *mut _); 547 } 548 } 549 result.into() 550 } 551 } 552 } 553 554 pub fn write(&self, offset: hwaddr, value: u64, _size: u32) { 555 let mut update_irq = false; 556 if let Ok(field) = RegisterOffset::try_from(offset) { 557 // qemu_chr_fe_write_all() calls into the can_receive 558 // callback, so handle writes before entering PL011Registers. 559 if field == RegisterOffset::DR { 560 // ??? Check if transmitter is enabled. 561 let ch: u8 = value as u8; 562 // SAFETY: char_backend is a valid CharBackend instance after it's been 563 // initialized in realize(). 564 // XXX this blocks entire thread. Rewrite to use 565 // qemu_chr_fe_write and background I/O callbacks 566 unsafe { 567 qemu_chr_fe_write_all(addr_of!(self.char_backend) as *mut _, &ch, 1); 568 } 569 } 570 571 update_irq = self.regs.borrow_mut().write( 572 field, 573 value as u32, 574 addr_of!(self.char_backend) as *mut _, 575 ); 576 } else { 577 eprintln!("write bad offset {offset} value {value}"); 578 } 579 if update_irq { 580 self.update(); 581 } 582 } 583 584 pub fn can_receive(&self) -> bool { 585 // trace_pl011_can_receive(s->lcr, s->read_count, r); 586 let regs = self.regs.borrow(); 587 regs.read_count < regs.fifo_depth() 588 } 589 590 pub fn receive(&self, ch: u32) { 591 let mut regs = self.regs.borrow_mut(); 592 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 593 // Release the BqlRefCell before calling self.update() 594 drop(regs); 595 596 if update_irq { 597 self.update(); 598 } 599 } 600 601 pub fn event(&self, event: QEMUChrEvent) { 602 let mut update_irq = false; 603 let mut regs = self.regs.borrow_mut(); 604 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 605 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 606 } 607 // Release the BqlRefCell before calling self.update() 608 drop(regs); 609 610 if update_irq { 611 self.update() 612 } 613 } 614 615 pub fn realize(&self) { 616 // SAFETY: self.char_backend has the correct size and alignment for a 617 // CharBackend object, and its callbacks are of the correct types. 618 unsafe { 619 qemu_chr_fe_set_handlers( 620 addr_of!(self.char_backend) as *mut CharBackend, 621 Some(pl011_can_receive), 622 Some(pl011_receive), 623 Some(pl011_event), 624 None, 625 addr_of!(*self).cast::<c_void>() as *mut c_void, 626 core::ptr::null_mut(), 627 true, 628 ); 629 } 630 } 631 632 pub fn reset_hold(&self, _type: ResetType) { 633 self.regs.borrow_mut().reset(); 634 } 635 636 pub fn update(&self) { 637 let regs = self.regs.borrow(); 638 let flags = regs.int_level & regs.int_enabled; 639 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 640 irq.set(flags & i != 0); 641 } 642 } 643 644 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 645 self.regs.borrow_mut().post_load() 646 } 647 } 648 649 /// Which bits in the interrupt status matter for each outbound IRQ line ? 650 const IRQMASK: [u32; 6] = [ 651 /* combined IRQ */ 652 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 653 Interrupt::RX.0, 654 Interrupt::TX.0, 655 Interrupt::RT.0, 656 Interrupt::MS.0, 657 Interrupt::E.0, 658 ]; 659 660 /// # Safety 661 /// 662 /// We expect the FFI user of this function to pass a valid pointer, that has 663 /// the same size as [`PL011State`]. We also expect the device is 664 /// readable/writeable from one thread at any time. 665 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 666 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 667 unsafe { state.as_ref().can_receive().into() } 668 } 669 670 /// # Safety 671 /// 672 /// We expect the FFI user of this function to pass a valid pointer, that has 673 /// the same size as [`PL011State`]. We also expect the device is 674 /// readable/writeable from one thread at any time. 675 /// 676 /// The buffer and size arguments must also be valid. 677 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 678 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 679 unsafe { 680 if size > 0 { 681 debug_assert!(!buf.is_null()); 682 state.as_ref().receive(u32::from(buf.read_volatile())); 683 } 684 } 685 } 686 687 /// # Safety 688 /// 689 /// We expect the FFI user of this function to pass a valid pointer, that has 690 /// the same size as [`PL011State`]. We also expect the device is 691 /// readable/writeable from one thread at any time. 692 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 693 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 694 unsafe { state.as_ref().event(event) } 695 } 696 697 /// # Safety 698 /// 699 /// We expect the FFI user of this function to pass a valid pointer for `chr` 700 /// and `irq`. 701 #[no_mangle] 702 pub unsafe extern "C" fn pl011_create( 703 addr: u64, 704 irq: *mut IRQState, 705 chr: *mut Chardev, 706 ) -> *mut DeviceState { 707 // SAFETY: The callers promise that they have owned references. 708 // They do not gift them to pl011_create, so use `Owned::from`. 709 let irq = unsafe { Owned::<IRQState>::from(&*irq) }; 710 let chr = unsafe { Owned::<Chardev>::from(&*chr) }; 711 712 let dev = PL011State::new(); 713 dev.prop_set_chr("chardev", &chr); 714 dev.sysbus_realize(); 715 dev.mmio_map(0, addr); 716 dev.connect_irq(0, &irq); 717 718 // The pointer is kept alive by the QOM tree; drop the owned ref 719 dev.as_mut_ptr() 720 } 721 722 #[repr(C)] 723 #[derive(qemu_api_macros::Object)] 724 /// PL011 Luminary device model. 725 pub struct PL011Luminary { 726 parent_obj: ParentField<PL011State>, 727 } 728 729 impl ClassInitImpl<PL011Class> for PL011Luminary { 730 fn class_init(klass: &mut PL011Class) { 731 klass.device_id = DeviceId::LUMINARY; 732 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 733 } 734 } 735 736 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 737 738 unsafe impl ObjectType for PL011Luminary { 739 type Class = <PL011State as ObjectType>::Class; 740 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 741 } 742 743 impl ObjectImpl for PL011Luminary { 744 type ParentType = PL011State; 745 } 746 747 impl DeviceImpl for PL011Luminary {} 748 impl ResettablePhasesImpl for PL011Luminary {} 749