1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use std::{ 6 ffi::CStr, 7 os::raw::{c_int, c_void}, 8 ptr::{addr_of, addr_of_mut, NonNull}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 qemu_chr_fe_accept_input, qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, 14 qemu_chr_fe_write_all, CharBackend, QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 15 }, 16 chardev::Chardev, 17 impl_vmstate_forward, 18 irq::{IRQState, InterruptSource}, 19 memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, 20 prelude::*, 21 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 22 qom::{ClassInitImpl, ObjectImpl, Owned, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceClass, SysBusDeviceImpl}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 registers::{self, Interrupt}, 30 RegisterOffset, 31 }; 32 33 /// Integer Baud Rate Divider, `UARTIBRD` 34 const IBRD_MASK: u32 = 0xffff; 35 36 /// Fractional Baud Rate Divider, `UARTFBRD` 37 const FBRD_MASK: u32 = 0x3f; 38 39 /// QEMU sourced constant. 40 pub const PL011_FIFO_DEPTH: u32 = 16; 41 42 #[derive(Clone, Copy)] 43 struct DeviceId(&'static [u8; 8]); 44 45 impl std::ops::Index<hwaddr> for DeviceId { 46 type Output = u8; 47 48 fn index(&self, idx: hwaddr) -> &Self::Output { 49 &self.0[idx as usize] 50 } 51 } 52 53 impl DeviceId { 54 const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 55 const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 56 } 57 58 // FIFOs use 32-bit indices instead of usize, for compatibility with 59 // the migration stream produced by the C version of this device. 60 #[repr(transparent)] 61 #[derive(Debug, Default)] 62 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 63 impl_vmstate_forward!(Fifo); 64 65 impl Fifo { 66 const fn len(&self) -> u32 { 67 self.0.len() as u32 68 } 69 } 70 71 impl std::ops::IndexMut<u32> for Fifo { 72 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 73 &mut self.0[idx as usize] 74 } 75 } 76 77 impl std::ops::Index<u32> for Fifo { 78 type Output = registers::Data; 79 80 fn index(&self, idx: u32) -> &Self::Output { 81 &self.0[idx as usize] 82 } 83 } 84 85 #[repr(C)] 86 #[derive(Debug, Default, qemu_api_macros::offsets)] 87 pub struct PL011Registers { 88 #[doc(alias = "fr")] 89 pub flags: registers::Flags, 90 #[doc(alias = "lcr")] 91 pub line_control: registers::LineControl, 92 #[doc(alias = "rsr")] 93 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 94 #[doc(alias = "cr")] 95 pub control: registers::Control, 96 pub dmacr: u32, 97 pub int_enabled: u32, 98 pub int_level: u32, 99 pub read_fifo: Fifo, 100 pub ilpr: u32, 101 pub ibrd: u32, 102 pub fbrd: u32, 103 pub ifl: u32, 104 pub read_pos: u32, 105 pub read_count: u32, 106 pub read_trigger: u32, 107 } 108 109 #[repr(C)] 110 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 111 /// PL011 Device Model in QEMU 112 pub struct PL011State { 113 pub parent_obj: ParentField<SysBusDevice>, 114 pub iomem: MemoryRegion, 115 #[doc(alias = "chr")] 116 pub char_backend: CharBackend, 117 pub regs: BqlRefCell<PL011Registers>, 118 /// QEMU interrupts 119 /// 120 /// ```text 121 /// * sysbus MMIO region 0: device registers 122 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 123 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 124 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 125 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 126 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 127 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 128 /// ``` 129 #[doc(alias = "irq")] 130 pub interrupts: [InterruptSource; IRQMASK.len()], 131 #[doc(alias = "clk")] 132 pub clock: Owned<Clock>, 133 #[doc(alias = "migrate_clk")] 134 pub migrate_clock: bool, 135 } 136 137 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 138 139 #[repr(C)] 140 pub struct PL011Class { 141 parent_class: <SysBusDevice as ObjectType>::Class, 142 /// The byte string that identifies the device. 143 device_id: DeviceId, 144 } 145 146 unsafe impl ObjectType for PL011State { 147 type Class = PL011Class; 148 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 149 } 150 151 impl ClassInitImpl<PL011Class> for PL011State { 152 fn class_init(klass: &mut PL011Class) { 153 klass.device_id = DeviceId::ARM; 154 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 155 } 156 } 157 158 impl ObjectImpl for PL011State { 159 type ParentType = SysBusDevice; 160 161 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 162 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 163 } 164 165 impl DeviceImpl for PL011State { 166 fn properties() -> &'static [Property] { 167 &device_class::PL011_PROPERTIES 168 } 169 fn vmsd() -> Option<&'static VMStateDescription> { 170 Some(&device_class::VMSTATE_PL011) 171 } 172 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 173 } 174 175 impl ResettablePhasesImpl for PL011State { 176 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 177 } 178 179 impl SysBusDeviceImpl for PL011State {} 180 181 impl PL011Registers { 182 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 183 use RegisterOffset::*; 184 185 let mut update = false; 186 let result = match offset { 187 DR => { 188 self.flags.set_receive_fifo_full(false); 189 let c = self.read_fifo[self.read_pos]; 190 if self.read_count > 0 { 191 self.read_count -= 1; 192 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 193 } 194 if self.read_count == 0 { 195 self.flags.set_receive_fifo_empty(true); 196 } 197 if self.read_count + 1 == self.read_trigger { 198 self.int_level &= !Interrupt::RX.0; 199 } 200 // Update error bits. 201 self.receive_status_error_clear.set_from_data(c); 202 // Must call qemu_chr_fe_accept_input 203 update = true; 204 u32::from(c) 205 } 206 RSR => u32::from(self.receive_status_error_clear), 207 FR => u32::from(self.flags), 208 FBRD => self.fbrd, 209 ILPR => self.ilpr, 210 IBRD => self.ibrd, 211 LCR_H => u32::from(self.line_control), 212 CR => u32::from(self.control), 213 FLS => self.ifl, 214 IMSC => self.int_enabled, 215 RIS => self.int_level, 216 MIS => self.int_level & self.int_enabled, 217 ICR => { 218 // "The UARTICR Register is the interrupt clear register and is write-only" 219 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 220 0 221 } 222 DMACR => self.dmacr, 223 }; 224 (update, result) 225 } 226 227 pub(self) fn write( 228 &mut self, 229 offset: RegisterOffset, 230 value: u32, 231 char_backend: *mut CharBackend, 232 ) -> bool { 233 // eprintln!("write offset {offset} value {value}"); 234 use RegisterOffset::*; 235 match offset { 236 DR => { 237 // interrupts always checked 238 let _ = self.loopback_tx(value); 239 self.int_level |= Interrupt::TX.0; 240 return true; 241 } 242 RSR => { 243 self.receive_status_error_clear = 0.into(); 244 } 245 FR => { 246 // flag writes are ignored 247 } 248 ILPR => { 249 self.ilpr = value; 250 } 251 IBRD => { 252 self.ibrd = value; 253 } 254 FBRD => { 255 self.fbrd = value; 256 } 257 LCR_H => { 258 let new_val: registers::LineControl = value.into(); 259 // Reset the FIFO state on FIFO enable or disable 260 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 261 self.reset_rx_fifo(); 262 self.reset_tx_fifo(); 263 } 264 let update = (self.line_control.send_break() != new_val.send_break()) && { 265 let mut break_enable: c_int = new_val.send_break().into(); 266 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 267 // initialized in realize(). 268 unsafe { 269 qemu_chr_fe_ioctl( 270 char_backend, 271 CHR_IOCTL_SERIAL_SET_BREAK as i32, 272 addr_of_mut!(break_enable).cast::<c_void>(), 273 ); 274 } 275 self.loopback_break(break_enable > 0) 276 }; 277 self.line_control = new_val; 278 self.set_read_trigger(); 279 return update; 280 } 281 CR => { 282 // ??? Need to implement the enable bit. 283 self.control = value.into(); 284 return self.loopback_mdmctrl(); 285 } 286 FLS => { 287 self.ifl = value; 288 self.set_read_trigger(); 289 } 290 IMSC => { 291 self.int_enabled = value; 292 return true; 293 } 294 RIS => {} 295 MIS => {} 296 ICR => { 297 self.int_level &= !value; 298 return true; 299 } 300 DMACR => { 301 self.dmacr = value; 302 if value & 3 > 0 { 303 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 304 eprintln!("pl011: DMA not implemented"); 305 } 306 } 307 } 308 false 309 } 310 311 #[inline] 312 #[must_use] 313 fn loopback_tx(&mut self, value: u32) -> bool { 314 // Caveat: 315 // 316 // In real hardware, TX loopback happens at the serial-bit level 317 // and then reassembled by the RX logics back into bytes and placed 318 // into the RX fifo. That is, loopback happens after TX fifo. 319 // 320 // Because the real hardware TX fifo is time-drained at the frame 321 // rate governed by the configured serial format, some loopback 322 // bytes in TX fifo may still be able to get into the RX fifo 323 // that could be full at times while being drained at software 324 // pace. 325 // 326 // In such scenario, the RX draining pace is the major factor 327 // deciding which loopback bytes get into the RX fifo, unless 328 // hardware flow-control is enabled. 329 // 330 // For simplicity, the above described is not emulated. 331 self.loopback_enabled() && self.put_fifo(value) 332 } 333 334 #[must_use] 335 fn loopback_mdmctrl(&mut self) -> bool { 336 if !self.loopback_enabled() { 337 return false; 338 } 339 340 /* 341 * Loopback software-driven modem control outputs to modem status inputs: 342 * FR.RI <= CR.Out2 343 * FR.DCD <= CR.Out1 344 * FR.CTS <= CR.RTS 345 * FR.DSR <= CR.DTR 346 * 347 * The loopback happens immediately even if this call is triggered 348 * by setting only CR.LBE. 349 * 350 * CTS/RTS updates due to enabled hardware flow controls are not 351 * dealt with here. 352 */ 353 354 self.flags.set_ring_indicator(self.control.out_2()); 355 self.flags.set_data_carrier_detect(self.control.out_1()); 356 self.flags.set_clear_to_send(self.control.request_to_send()); 357 self.flags 358 .set_data_set_ready(self.control.data_transmit_ready()); 359 360 // Change interrupts based on updated FR 361 let mut il = self.int_level; 362 363 il &= !Interrupt::MS.0; 364 365 if self.flags.data_set_ready() { 366 il |= Interrupt::DSR.0; 367 } 368 if self.flags.data_carrier_detect() { 369 il |= Interrupt::DCD.0; 370 } 371 if self.flags.clear_to_send() { 372 il |= Interrupt::CTS.0; 373 } 374 if self.flags.ring_indicator() { 375 il |= Interrupt::RI.0; 376 } 377 self.int_level = il; 378 true 379 } 380 381 fn loopback_break(&mut self, enable: bool) -> bool { 382 enable && self.loopback_tx(registers::Data::BREAK.into()) 383 } 384 385 fn set_read_trigger(&mut self) { 386 self.read_trigger = 1; 387 } 388 389 pub fn reset(&mut self) { 390 self.line_control.reset(); 391 self.receive_status_error_clear.reset(); 392 self.dmacr = 0; 393 self.int_enabled = 0; 394 self.int_level = 0; 395 self.ilpr = 0; 396 self.ibrd = 0; 397 self.fbrd = 0; 398 self.read_trigger = 1; 399 self.ifl = 0x12; 400 self.control.reset(); 401 self.flags.reset(); 402 self.reset_rx_fifo(); 403 self.reset_tx_fifo(); 404 } 405 406 pub fn reset_rx_fifo(&mut self) { 407 self.read_count = 0; 408 self.read_pos = 0; 409 410 // Reset FIFO flags 411 self.flags.set_receive_fifo_full(false); 412 self.flags.set_receive_fifo_empty(true); 413 } 414 415 pub fn reset_tx_fifo(&mut self) { 416 // Reset FIFO flags 417 self.flags.set_transmit_fifo_full(false); 418 self.flags.set_transmit_fifo_empty(true); 419 } 420 421 #[inline] 422 pub fn fifo_enabled(&self) -> bool { 423 self.line_control.fifos_enabled() == registers::Mode::FIFO 424 } 425 426 #[inline] 427 pub fn loopback_enabled(&self) -> bool { 428 self.control.enable_loopback() 429 } 430 431 #[inline] 432 pub fn fifo_depth(&self) -> u32 { 433 // Note: FIFO depth is expected to be power-of-2 434 if self.fifo_enabled() { 435 return PL011_FIFO_DEPTH; 436 } 437 1 438 } 439 440 #[must_use] 441 pub fn put_fifo(&mut self, value: u32) -> bool { 442 let depth = self.fifo_depth(); 443 assert!(depth > 0); 444 let slot = (self.read_pos + self.read_count) & (depth - 1); 445 self.read_fifo[slot] = registers::Data::from(value); 446 self.read_count += 1; 447 self.flags.set_receive_fifo_empty(false); 448 if self.read_count == depth { 449 self.flags.set_receive_fifo_full(true); 450 } 451 452 if self.read_count == self.read_trigger { 453 self.int_level |= Interrupt::RX.0; 454 return true; 455 } 456 false 457 } 458 459 pub fn post_load(&mut self) -> Result<(), ()> { 460 /* Sanity-check input state */ 461 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 462 return Err(()); 463 } 464 465 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 466 // Older versions of PL011 didn't ensure that the single 467 // character in the FIFO in FIFO-disabled mode is in 468 // element 0 of the array; convert to follow the current 469 // code's assumptions. 470 self.read_fifo[0] = self.read_fifo[self.read_pos]; 471 self.read_pos = 0; 472 } 473 474 self.ibrd &= IBRD_MASK; 475 self.fbrd &= FBRD_MASK; 476 477 Ok(()) 478 } 479 } 480 481 impl PL011State { 482 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 483 /// 484 /// # Safety 485 /// 486 /// `self` must point to a correctly sized and aligned location for the 487 /// `PL011State` type. It must not be called more than once on the same 488 /// location/instance. All its fields are expected to hold unitialized 489 /// values with the sole exception of `parent_obj`. 490 unsafe fn init(&mut self) { 491 static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new() 492 .read(&PL011State::read) 493 .write(&PL011State::write) 494 .native_endian() 495 .impl_sizes(4, 4) 496 .build(); 497 498 // SAFETY: 499 // 500 // self and self.iomem are guaranteed to be valid at this point since callers 501 // must make sure the `self` reference is valid. 502 MemoryRegion::init_io( 503 unsafe { &mut *addr_of_mut!(self.iomem) }, 504 addr_of_mut!(*self), 505 &PL011_OPS, 506 "pl011", 507 0x1000, 508 ); 509 510 self.regs = Default::default(); 511 512 // SAFETY: 513 // 514 // self.clock is not initialized at this point; but since `Owned<_>` is 515 // not Drop, we can overwrite the undefined value without side effects; 516 // it's not sound but, because for all PL011State instances are created 517 // by QOM code which calls this function to initialize the fields, at 518 // leastno code is able to access an invalid self.clock value. 519 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 520 } 521 522 const fn clock_update(&self, _event: ClockEvent) { 523 /* pl011_trace_baudrate_change(s); */ 524 } 525 526 fn post_init(&self) { 527 self.init_mmio(&self.iomem); 528 for irq in self.interrupts.iter() { 529 self.init_irq(irq); 530 } 531 } 532 533 pub fn read(&self, offset: hwaddr, _size: u32) -> u64 { 534 match RegisterOffset::try_from(offset) { 535 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 536 let device_id = self.get_class().device_id; 537 u64::from(device_id[(offset - 0xfe0) >> 2]) 538 } 539 Err(_) => { 540 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 541 0 542 } 543 Ok(field) => { 544 let (update_irq, result) = self.regs.borrow_mut().read(field); 545 if update_irq { 546 self.update(); 547 unsafe { 548 qemu_chr_fe_accept_input(addr_of!(self.char_backend) as *mut _); 549 } 550 } 551 result.into() 552 } 553 } 554 } 555 556 pub fn write(&self, offset: hwaddr, value: u64, _size: u32) { 557 let mut update_irq = false; 558 if let Ok(field) = RegisterOffset::try_from(offset) { 559 // qemu_chr_fe_write_all() calls into the can_receive 560 // callback, so handle writes before entering PL011Registers. 561 if field == RegisterOffset::DR { 562 // ??? Check if transmitter is enabled. 563 let ch: u8 = value as u8; 564 // SAFETY: char_backend is a valid CharBackend instance after it's been 565 // initialized in realize(). 566 // XXX this blocks entire thread. Rewrite to use 567 // qemu_chr_fe_write and background I/O callbacks 568 unsafe { 569 qemu_chr_fe_write_all(addr_of!(self.char_backend) as *mut _, &ch, 1); 570 } 571 } 572 573 update_irq = self.regs.borrow_mut().write( 574 field, 575 value as u32, 576 addr_of!(self.char_backend) as *mut _, 577 ); 578 } else { 579 eprintln!("write bad offset {offset} value {value}"); 580 } 581 if update_irq { 582 self.update(); 583 } 584 } 585 586 pub fn can_receive(&self) -> bool { 587 // trace_pl011_can_receive(s->lcr, s->read_count, r); 588 let regs = self.regs.borrow(); 589 regs.read_count < regs.fifo_depth() 590 } 591 592 pub fn receive(&self, ch: u32) { 593 let mut regs = self.regs.borrow_mut(); 594 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 595 // Release the BqlRefCell before calling self.update() 596 drop(regs); 597 598 if update_irq { 599 self.update(); 600 } 601 } 602 603 pub fn event(&self, event: QEMUChrEvent) { 604 let mut update_irq = false; 605 let mut regs = self.regs.borrow_mut(); 606 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 607 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 608 } 609 // Release the BqlRefCell before calling self.update() 610 drop(regs); 611 612 if update_irq { 613 self.update() 614 } 615 } 616 617 pub fn realize(&self) { 618 // SAFETY: self.char_backend has the correct size and alignment for a 619 // CharBackend object, and its callbacks are of the correct types. 620 unsafe { 621 qemu_chr_fe_set_handlers( 622 addr_of!(self.char_backend) as *mut CharBackend, 623 Some(pl011_can_receive), 624 Some(pl011_receive), 625 Some(pl011_event), 626 None, 627 addr_of!(*self).cast::<c_void>() as *mut c_void, 628 core::ptr::null_mut(), 629 true, 630 ); 631 } 632 } 633 634 pub fn reset_hold(&self, _type: ResetType) { 635 self.regs.borrow_mut().reset(); 636 } 637 638 pub fn update(&self) { 639 let regs = self.regs.borrow(); 640 let flags = regs.int_level & regs.int_enabled; 641 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 642 irq.set(flags & i != 0); 643 } 644 } 645 646 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 647 self.regs.borrow_mut().post_load() 648 } 649 } 650 651 /// Which bits in the interrupt status matter for each outbound IRQ line ? 652 const IRQMASK: [u32; 6] = [ 653 /* combined IRQ */ 654 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 655 Interrupt::RX.0, 656 Interrupt::TX.0, 657 Interrupt::RT.0, 658 Interrupt::MS.0, 659 Interrupt::E.0, 660 ]; 661 662 /// # Safety 663 /// 664 /// We expect the FFI user of this function to pass a valid pointer, that has 665 /// the same size as [`PL011State`]. We also expect the device is 666 /// readable/writeable from one thread at any time. 667 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 668 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 669 unsafe { state.as_ref().can_receive().into() } 670 } 671 672 /// # Safety 673 /// 674 /// We expect the FFI user of this function to pass a valid pointer, that has 675 /// the same size as [`PL011State`]. We also expect the device is 676 /// readable/writeable from one thread at any time. 677 /// 678 /// The buffer and size arguments must also be valid. 679 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 680 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 681 unsafe { 682 if size > 0 { 683 debug_assert!(!buf.is_null()); 684 state.as_ref().receive(u32::from(buf.read_volatile())); 685 } 686 } 687 } 688 689 /// # Safety 690 /// 691 /// We expect the FFI user of this function to pass a valid pointer, that has 692 /// the same size as [`PL011State`]. We also expect the device is 693 /// readable/writeable from one thread at any time. 694 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 695 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 696 unsafe { state.as_ref().event(event) } 697 } 698 699 /// # Safety 700 /// 701 /// We expect the FFI user of this function to pass a valid pointer for `chr` 702 /// and `irq`. 703 #[no_mangle] 704 pub unsafe extern "C" fn pl011_create( 705 addr: u64, 706 irq: *mut IRQState, 707 chr: *mut Chardev, 708 ) -> *mut DeviceState { 709 // SAFETY: The callers promise that they have owned references. 710 // They do not gift them to pl011_create, so use `Owned::from`. 711 let irq = unsafe { Owned::<IRQState>::from(&*irq) }; 712 let chr = unsafe { Owned::<Chardev>::from(&*chr) }; 713 714 let dev = PL011State::new(); 715 dev.prop_set_chr("chardev", &chr); 716 dev.sysbus_realize(); 717 dev.mmio_map(0, addr); 718 dev.connect_irq(0, &irq); 719 720 // The pointer is kept alive by the QOM tree; drop the owned ref 721 dev.as_mut_ptr() 722 } 723 724 #[repr(C)] 725 #[derive(qemu_api_macros::Object)] 726 /// PL011 Luminary device model. 727 pub struct PL011Luminary { 728 parent_obj: ParentField<PL011State>, 729 } 730 731 impl ClassInitImpl<PL011Class> for PL011Luminary { 732 fn class_init(klass: &mut PL011Class) { 733 klass.device_id = DeviceId::LUMINARY; 734 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 735 } 736 } 737 738 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 739 740 unsafe impl ObjectType for PL011Luminary { 741 type Class = <PL011State as ObjectType>::Class; 742 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 743 } 744 745 impl ObjectImpl for PL011Luminary { 746 type ParentType = PL011State; 747 } 748 749 impl DeviceImpl for PL011Luminary {} 750 impl ResettablePhasesImpl for PL011Luminary {} 751 impl SysBusDeviceImpl for PL011Luminary {} 752