1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use std::{ 6 ffi::CStr, 7 os::raw::{c_int, c_void}, 8 ptr::{addr_of, addr_of_mut, NonNull}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 qemu_chr_fe_accept_input, qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, 14 qemu_chr_fe_write_all, CharBackend, QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 15 }, 16 chardev::Chardev, 17 impl_vmstate_forward, 18 irq::{IRQState, InterruptSource}, 19 memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, 20 prelude::*, 21 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 22 qom::{ObjectImpl, Owned, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceImpl}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 registers::{self, Interrupt}, 30 RegisterOffset, 31 }; 32 33 /// Integer Baud Rate Divider, `UARTIBRD` 34 const IBRD_MASK: u32 = 0xffff; 35 36 /// Fractional Baud Rate Divider, `UARTFBRD` 37 const FBRD_MASK: u32 = 0x3f; 38 39 /// QEMU sourced constant. 40 pub const PL011_FIFO_DEPTH: u32 = 16; 41 42 #[derive(Clone, Copy)] 43 struct DeviceId(&'static [u8; 8]); 44 45 impl std::ops::Index<hwaddr> for DeviceId { 46 type Output = u8; 47 48 fn index(&self, idx: hwaddr) -> &Self::Output { 49 &self.0[idx as usize] 50 } 51 } 52 53 // FIFOs use 32-bit indices instead of usize, for compatibility with 54 // the migration stream produced by the C version of this device. 55 #[repr(transparent)] 56 #[derive(Debug, Default)] 57 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 58 impl_vmstate_forward!(Fifo); 59 60 impl Fifo { 61 const fn len(&self) -> u32 { 62 self.0.len() as u32 63 } 64 } 65 66 impl std::ops::IndexMut<u32> for Fifo { 67 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 68 &mut self.0[idx as usize] 69 } 70 } 71 72 impl std::ops::Index<u32> for Fifo { 73 type Output = registers::Data; 74 75 fn index(&self, idx: u32) -> &Self::Output { 76 &self.0[idx as usize] 77 } 78 } 79 80 #[repr(C)] 81 #[derive(Debug, Default, qemu_api_macros::offsets)] 82 pub struct PL011Registers { 83 #[doc(alias = "fr")] 84 pub flags: registers::Flags, 85 #[doc(alias = "lcr")] 86 pub line_control: registers::LineControl, 87 #[doc(alias = "rsr")] 88 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 89 #[doc(alias = "cr")] 90 pub control: registers::Control, 91 pub dmacr: u32, 92 pub int_enabled: u32, 93 pub int_level: u32, 94 pub read_fifo: Fifo, 95 pub ilpr: u32, 96 pub ibrd: u32, 97 pub fbrd: u32, 98 pub ifl: u32, 99 pub read_pos: u32, 100 pub read_count: u32, 101 pub read_trigger: u32, 102 } 103 104 #[repr(C)] 105 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 106 /// PL011 Device Model in QEMU 107 pub struct PL011State { 108 pub parent_obj: ParentField<SysBusDevice>, 109 pub iomem: MemoryRegion, 110 #[doc(alias = "chr")] 111 pub char_backend: CharBackend, 112 pub regs: BqlRefCell<PL011Registers>, 113 /// QEMU interrupts 114 /// 115 /// ```text 116 /// * sysbus MMIO region 0: device registers 117 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 118 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 119 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 120 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 121 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 122 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 123 /// ``` 124 #[doc(alias = "irq")] 125 pub interrupts: [InterruptSource; IRQMASK.len()], 126 #[doc(alias = "clk")] 127 pub clock: Owned<Clock>, 128 #[doc(alias = "migrate_clk")] 129 pub migrate_clock: bool, 130 } 131 132 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 133 134 #[repr(C)] 135 pub struct PL011Class { 136 parent_class: <SysBusDevice as ObjectType>::Class, 137 /// The byte string that identifies the device. 138 device_id: DeviceId, 139 } 140 141 trait PL011Impl: SysBusDeviceImpl + IsA<PL011State> { 142 const DEVICE_ID: DeviceId; 143 } 144 145 impl PL011Class { 146 fn class_init<T: PL011Impl>(&mut self) { 147 self.device_id = T::DEVICE_ID; 148 self.parent_class.class_init::<T>(); 149 } 150 } 151 152 unsafe impl ObjectType for PL011State { 153 type Class = PL011Class; 154 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 155 } 156 157 impl PL011Impl for PL011State { 158 const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 159 } 160 161 impl ObjectImpl for PL011State { 162 type ParentType = SysBusDevice; 163 164 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 165 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 166 const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>; 167 } 168 169 impl DeviceImpl for PL011State { 170 fn properties() -> &'static [Property] { 171 &device_class::PL011_PROPERTIES 172 } 173 fn vmsd() -> Option<&'static VMStateDescription> { 174 Some(&device_class::VMSTATE_PL011) 175 } 176 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 177 } 178 179 impl ResettablePhasesImpl for PL011State { 180 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 181 } 182 183 impl SysBusDeviceImpl for PL011State {} 184 185 impl PL011Registers { 186 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 187 use RegisterOffset::*; 188 189 let mut update = false; 190 let result = match offset { 191 DR => { 192 self.flags.set_receive_fifo_full(false); 193 let c = self.read_fifo[self.read_pos]; 194 if self.read_count > 0 { 195 self.read_count -= 1; 196 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 197 } 198 if self.read_count == 0 { 199 self.flags.set_receive_fifo_empty(true); 200 } 201 if self.read_count + 1 == self.read_trigger { 202 self.int_level &= !Interrupt::RX.0; 203 } 204 // Update error bits. 205 self.receive_status_error_clear.set_from_data(c); 206 // Must call qemu_chr_fe_accept_input 207 update = true; 208 u32::from(c) 209 } 210 RSR => u32::from(self.receive_status_error_clear), 211 FR => u32::from(self.flags), 212 FBRD => self.fbrd, 213 ILPR => self.ilpr, 214 IBRD => self.ibrd, 215 LCR_H => u32::from(self.line_control), 216 CR => u32::from(self.control), 217 FLS => self.ifl, 218 IMSC => self.int_enabled, 219 RIS => self.int_level, 220 MIS => self.int_level & self.int_enabled, 221 ICR => { 222 // "The UARTICR Register is the interrupt clear register and is write-only" 223 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 224 0 225 } 226 DMACR => self.dmacr, 227 }; 228 (update, result) 229 } 230 231 pub(self) fn write( 232 &mut self, 233 offset: RegisterOffset, 234 value: u32, 235 char_backend: *mut CharBackend, 236 ) -> bool { 237 // eprintln!("write offset {offset} value {value}"); 238 use RegisterOffset::*; 239 match offset { 240 DR => { 241 // interrupts always checked 242 let _ = self.loopback_tx(value); 243 self.int_level |= Interrupt::TX.0; 244 return true; 245 } 246 RSR => { 247 self.receive_status_error_clear = 0.into(); 248 } 249 FR => { 250 // flag writes are ignored 251 } 252 ILPR => { 253 self.ilpr = value; 254 } 255 IBRD => { 256 self.ibrd = value; 257 } 258 FBRD => { 259 self.fbrd = value; 260 } 261 LCR_H => { 262 let new_val: registers::LineControl = value.into(); 263 // Reset the FIFO state on FIFO enable or disable 264 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 265 self.reset_rx_fifo(); 266 self.reset_tx_fifo(); 267 } 268 let update = (self.line_control.send_break() != new_val.send_break()) && { 269 let mut break_enable: c_int = new_val.send_break().into(); 270 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 271 // initialized in realize(). 272 unsafe { 273 qemu_chr_fe_ioctl( 274 char_backend, 275 CHR_IOCTL_SERIAL_SET_BREAK as i32, 276 addr_of_mut!(break_enable).cast::<c_void>(), 277 ); 278 } 279 self.loopback_break(break_enable > 0) 280 }; 281 self.line_control = new_val; 282 self.set_read_trigger(); 283 return update; 284 } 285 CR => { 286 // ??? Need to implement the enable bit. 287 self.control = value.into(); 288 return self.loopback_mdmctrl(); 289 } 290 FLS => { 291 self.ifl = value; 292 self.set_read_trigger(); 293 } 294 IMSC => { 295 self.int_enabled = value; 296 return true; 297 } 298 RIS => {} 299 MIS => {} 300 ICR => { 301 self.int_level &= !value; 302 return true; 303 } 304 DMACR => { 305 self.dmacr = value; 306 if value & 3 > 0 { 307 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 308 eprintln!("pl011: DMA not implemented"); 309 } 310 } 311 } 312 false 313 } 314 315 #[inline] 316 #[must_use] 317 fn loopback_tx(&mut self, value: u32) -> bool { 318 // Caveat: 319 // 320 // In real hardware, TX loopback happens at the serial-bit level 321 // and then reassembled by the RX logics back into bytes and placed 322 // into the RX fifo. That is, loopback happens after TX fifo. 323 // 324 // Because the real hardware TX fifo is time-drained at the frame 325 // rate governed by the configured serial format, some loopback 326 // bytes in TX fifo may still be able to get into the RX fifo 327 // that could be full at times while being drained at software 328 // pace. 329 // 330 // In such scenario, the RX draining pace is the major factor 331 // deciding which loopback bytes get into the RX fifo, unless 332 // hardware flow-control is enabled. 333 // 334 // For simplicity, the above described is not emulated. 335 self.loopback_enabled() && self.put_fifo(value) 336 } 337 338 #[must_use] 339 fn loopback_mdmctrl(&mut self) -> bool { 340 if !self.loopback_enabled() { 341 return false; 342 } 343 344 /* 345 * Loopback software-driven modem control outputs to modem status inputs: 346 * FR.RI <= CR.Out2 347 * FR.DCD <= CR.Out1 348 * FR.CTS <= CR.RTS 349 * FR.DSR <= CR.DTR 350 * 351 * The loopback happens immediately even if this call is triggered 352 * by setting only CR.LBE. 353 * 354 * CTS/RTS updates due to enabled hardware flow controls are not 355 * dealt with here. 356 */ 357 358 self.flags.set_ring_indicator(self.control.out_2()); 359 self.flags.set_data_carrier_detect(self.control.out_1()); 360 self.flags.set_clear_to_send(self.control.request_to_send()); 361 self.flags 362 .set_data_set_ready(self.control.data_transmit_ready()); 363 364 // Change interrupts based on updated FR 365 let mut il = self.int_level; 366 367 il &= !Interrupt::MS.0; 368 369 if self.flags.data_set_ready() { 370 il |= Interrupt::DSR.0; 371 } 372 if self.flags.data_carrier_detect() { 373 il |= Interrupt::DCD.0; 374 } 375 if self.flags.clear_to_send() { 376 il |= Interrupt::CTS.0; 377 } 378 if self.flags.ring_indicator() { 379 il |= Interrupt::RI.0; 380 } 381 self.int_level = il; 382 true 383 } 384 385 fn loopback_break(&mut self, enable: bool) -> bool { 386 enable && self.loopback_tx(registers::Data::BREAK.into()) 387 } 388 389 fn set_read_trigger(&mut self) { 390 self.read_trigger = 1; 391 } 392 393 pub fn reset(&mut self) { 394 self.line_control.reset(); 395 self.receive_status_error_clear.reset(); 396 self.dmacr = 0; 397 self.int_enabled = 0; 398 self.int_level = 0; 399 self.ilpr = 0; 400 self.ibrd = 0; 401 self.fbrd = 0; 402 self.read_trigger = 1; 403 self.ifl = 0x12; 404 self.control.reset(); 405 self.flags.reset(); 406 self.reset_rx_fifo(); 407 self.reset_tx_fifo(); 408 } 409 410 pub fn reset_rx_fifo(&mut self) { 411 self.read_count = 0; 412 self.read_pos = 0; 413 414 // Reset FIFO flags 415 self.flags.set_receive_fifo_full(false); 416 self.flags.set_receive_fifo_empty(true); 417 } 418 419 pub fn reset_tx_fifo(&mut self) { 420 // Reset FIFO flags 421 self.flags.set_transmit_fifo_full(false); 422 self.flags.set_transmit_fifo_empty(true); 423 } 424 425 #[inline] 426 pub fn fifo_enabled(&self) -> bool { 427 self.line_control.fifos_enabled() == registers::Mode::FIFO 428 } 429 430 #[inline] 431 pub fn loopback_enabled(&self) -> bool { 432 self.control.enable_loopback() 433 } 434 435 #[inline] 436 pub fn fifo_depth(&self) -> u32 { 437 // Note: FIFO depth is expected to be power-of-2 438 if self.fifo_enabled() { 439 return PL011_FIFO_DEPTH; 440 } 441 1 442 } 443 444 #[must_use] 445 pub fn put_fifo(&mut self, value: u32) -> bool { 446 let depth = self.fifo_depth(); 447 assert!(depth > 0); 448 let slot = (self.read_pos + self.read_count) & (depth - 1); 449 self.read_fifo[slot] = registers::Data::from(value); 450 self.read_count += 1; 451 self.flags.set_receive_fifo_empty(false); 452 if self.read_count == depth { 453 self.flags.set_receive_fifo_full(true); 454 } 455 456 if self.read_count == self.read_trigger { 457 self.int_level |= Interrupt::RX.0; 458 return true; 459 } 460 false 461 } 462 463 pub fn post_load(&mut self) -> Result<(), ()> { 464 /* Sanity-check input state */ 465 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 466 return Err(()); 467 } 468 469 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 470 // Older versions of PL011 didn't ensure that the single 471 // character in the FIFO in FIFO-disabled mode is in 472 // element 0 of the array; convert to follow the current 473 // code's assumptions. 474 self.read_fifo[0] = self.read_fifo[self.read_pos]; 475 self.read_pos = 0; 476 } 477 478 self.ibrd &= IBRD_MASK; 479 self.fbrd &= FBRD_MASK; 480 481 Ok(()) 482 } 483 } 484 485 impl PL011State { 486 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 487 /// 488 /// # Safety 489 /// 490 /// `self` must point to a correctly sized and aligned location for the 491 /// `PL011State` type. It must not be called more than once on the same 492 /// location/instance. All its fields are expected to hold unitialized 493 /// values with the sole exception of `parent_obj`. 494 unsafe fn init(&mut self) { 495 static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new() 496 .read(&PL011State::read) 497 .write(&PL011State::write) 498 .native_endian() 499 .impl_sizes(4, 4) 500 .build(); 501 502 // SAFETY: 503 // 504 // self and self.iomem are guaranteed to be valid at this point since callers 505 // must make sure the `self` reference is valid. 506 MemoryRegion::init_io( 507 unsafe { &mut *addr_of_mut!(self.iomem) }, 508 addr_of_mut!(*self), 509 &PL011_OPS, 510 "pl011", 511 0x1000, 512 ); 513 514 self.regs = Default::default(); 515 516 // SAFETY: 517 // 518 // self.clock is not initialized at this point; but since `Owned<_>` is 519 // not Drop, we can overwrite the undefined value without side effects; 520 // it's not sound but, because for all PL011State instances are created 521 // by QOM code which calls this function to initialize the fields, at 522 // leastno code is able to access an invalid self.clock value. 523 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 524 } 525 526 const fn clock_update(&self, _event: ClockEvent) { 527 /* pl011_trace_baudrate_change(s); */ 528 } 529 530 fn post_init(&self) { 531 self.init_mmio(&self.iomem); 532 for irq in self.interrupts.iter() { 533 self.init_irq(irq); 534 } 535 } 536 537 pub fn read(&self, offset: hwaddr, _size: u32) -> u64 { 538 match RegisterOffset::try_from(offset) { 539 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 540 let device_id = self.get_class().device_id; 541 u64::from(device_id[(offset - 0xfe0) >> 2]) 542 } 543 Err(_) => { 544 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 545 0 546 } 547 Ok(field) => { 548 let (update_irq, result) = self.regs.borrow_mut().read(field); 549 if update_irq { 550 self.update(); 551 unsafe { 552 qemu_chr_fe_accept_input(addr_of!(self.char_backend) as *mut _); 553 } 554 } 555 result.into() 556 } 557 } 558 } 559 560 pub fn write(&self, offset: hwaddr, value: u64, _size: u32) { 561 let mut update_irq = false; 562 if let Ok(field) = RegisterOffset::try_from(offset) { 563 // qemu_chr_fe_write_all() calls into the can_receive 564 // callback, so handle writes before entering PL011Registers. 565 if field == RegisterOffset::DR { 566 // ??? Check if transmitter is enabled. 567 let ch: u8 = value as u8; 568 // SAFETY: char_backend is a valid CharBackend instance after it's been 569 // initialized in realize(). 570 // XXX this blocks entire thread. Rewrite to use 571 // qemu_chr_fe_write and background I/O callbacks 572 unsafe { 573 qemu_chr_fe_write_all(addr_of!(self.char_backend) as *mut _, &ch, 1); 574 } 575 } 576 577 update_irq = self.regs.borrow_mut().write( 578 field, 579 value as u32, 580 addr_of!(self.char_backend) as *mut _, 581 ); 582 } else { 583 eprintln!("write bad offset {offset} value {value}"); 584 } 585 if update_irq { 586 self.update(); 587 } 588 } 589 590 pub fn can_receive(&self) -> bool { 591 // trace_pl011_can_receive(s->lcr, s->read_count, r); 592 let regs = self.regs.borrow(); 593 regs.read_count < regs.fifo_depth() 594 } 595 596 pub fn receive(&self, ch: u32) { 597 let mut regs = self.regs.borrow_mut(); 598 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 599 // Release the BqlRefCell before calling self.update() 600 drop(regs); 601 602 if update_irq { 603 self.update(); 604 } 605 } 606 607 pub fn event(&self, event: QEMUChrEvent) { 608 let mut update_irq = false; 609 let mut regs = self.regs.borrow_mut(); 610 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 611 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 612 } 613 // Release the BqlRefCell before calling self.update() 614 drop(regs); 615 616 if update_irq { 617 self.update() 618 } 619 } 620 621 pub fn realize(&self) { 622 // SAFETY: self.char_backend has the correct size and alignment for a 623 // CharBackend object, and its callbacks are of the correct types. 624 unsafe { 625 qemu_chr_fe_set_handlers( 626 addr_of!(self.char_backend) as *mut CharBackend, 627 Some(pl011_can_receive), 628 Some(pl011_receive), 629 Some(pl011_event), 630 None, 631 addr_of!(*self).cast::<c_void>() as *mut c_void, 632 core::ptr::null_mut(), 633 true, 634 ); 635 } 636 } 637 638 pub fn reset_hold(&self, _type: ResetType) { 639 self.regs.borrow_mut().reset(); 640 } 641 642 pub fn update(&self) { 643 let regs = self.regs.borrow(); 644 let flags = regs.int_level & regs.int_enabled; 645 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 646 irq.set(flags & i != 0); 647 } 648 } 649 650 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 651 self.regs.borrow_mut().post_load() 652 } 653 } 654 655 /// Which bits in the interrupt status matter for each outbound IRQ line ? 656 const IRQMASK: [u32; 6] = [ 657 /* combined IRQ */ 658 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 659 Interrupt::RX.0, 660 Interrupt::TX.0, 661 Interrupt::RT.0, 662 Interrupt::MS.0, 663 Interrupt::E.0, 664 ]; 665 666 /// # Safety 667 /// 668 /// We expect the FFI user of this function to pass a valid pointer, that has 669 /// the same size as [`PL011State`]. We also expect the device is 670 /// readable/writeable from one thread at any time. 671 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 672 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 673 unsafe { state.as_ref().can_receive().into() } 674 } 675 676 /// # Safety 677 /// 678 /// We expect the FFI user of this function to pass a valid pointer, that has 679 /// the same size as [`PL011State`]. We also expect the device is 680 /// readable/writeable from one thread at any time. 681 /// 682 /// The buffer and size arguments must also be valid. 683 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 684 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 685 unsafe { 686 if size > 0 { 687 debug_assert!(!buf.is_null()); 688 state.as_ref().receive(u32::from(buf.read_volatile())); 689 } 690 } 691 } 692 693 /// # Safety 694 /// 695 /// We expect the FFI user of this function to pass a valid pointer, that has 696 /// the same size as [`PL011State`]. We also expect the device is 697 /// readable/writeable from one thread at any time. 698 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 699 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 700 unsafe { state.as_ref().event(event) } 701 } 702 703 /// # Safety 704 /// 705 /// We expect the FFI user of this function to pass a valid pointer for `chr` 706 /// and `irq`. 707 #[no_mangle] 708 pub unsafe extern "C" fn pl011_create( 709 addr: u64, 710 irq: *mut IRQState, 711 chr: *mut Chardev, 712 ) -> *mut DeviceState { 713 // SAFETY: The callers promise that they have owned references. 714 // They do not gift them to pl011_create, so use `Owned::from`. 715 let irq = unsafe { Owned::<IRQState>::from(&*irq) }; 716 let chr = unsafe { Owned::<Chardev>::from(&*chr) }; 717 718 let dev = PL011State::new(); 719 dev.prop_set_chr("chardev", &chr); 720 dev.sysbus_realize(); 721 dev.mmio_map(0, addr); 722 dev.connect_irq(0, &irq); 723 724 // The pointer is kept alive by the QOM tree; drop the owned ref 725 dev.as_mut_ptr() 726 } 727 728 #[repr(C)] 729 #[derive(qemu_api_macros::Object)] 730 /// PL011 Luminary device model. 731 pub struct PL011Luminary { 732 parent_obj: ParentField<PL011State>, 733 } 734 735 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 736 737 unsafe impl ObjectType for PL011Luminary { 738 type Class = <PL011State as ObjectType>::Class; 739 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 740 } 741 742 impl ObjectImpl for PL011Luminary { 743 type ParentType = PL011State; 744 745 const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>; 746 } 747 748 impl PL011Impl for PL011Luminary { 749 const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 750 } 751 752 impl DeviceImpl for PL011Luminary {} 753 impl ResettablePhasesImpl for PL011Luminary {} 754 impl SysBusDeviceImpl for PL011Luminary {} 755