1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use std::{ 6 ffi::CStr, 7 os::raw::{c_int, c_void}, 8 ptr::{addr_of, addr_of_mut, NonNull}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 qemu_chr_fe_accept_input, qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, 14 qemu_chr_fe_write_all, CharBackend, QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 15 }, 16 chardev::Chardev, 17 impl_vmstate_forward, 18 irq::{IRQState, InterruptSource}, 19 memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, 20 prelude::*, 21 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 22 qom::{ObjectImpl, Owned, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceImpl}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 registers::{self, Interrupt, RegisterOffset}, 30 }; 31 32 // TODO: You must disable the UART before any of the control registers are 33 // reprogrammed. When the UART is disabled in the middle of transmission or 34 // reception, it completes the current character before stopping 35 36 /// Integer Baud Rate Divider, `UARTIBRD` 37 const IBRD_MASK: u32 = 0xffff; 38 39 /// Fractional Baud Rate Divider, `UARTFBRD` 40 const FBRD_MASK: u32 = 0x3f; 41 42 /// QEMU sourced constant. 43 pub const PL011_FIFO_DEPTH: u32 = 16; 44 45 #[derive(Clone, Copy)] 46 struct DeviceId(&'static [u8; 8]); 47 48 impl std::ops::Index<hwaddr> for DeviceId { 49 type Output = u8; 50 51 fn index(&self, idx: hwaddr) -> &Self::Output { 52 &self.0[idx as usize] 53 } 54 } 55 56 // FIFOs use 32-bit indices instead of usize, for compatibility with 57 // the migration stream produced by the C version of this device. 58 #[repr(transparent)] 59 #[derive(Debug, Default)] 60 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 61 impl_vmstate_forward!(Fifo); 62 63 impl Fifo { 64 const fn len(&self) -> u32 { 65 self.0.len() as u32 66 } 67 } 68 69 impl std::ops::IndexMut<u32> for Fifo { 70 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 71 &mut self.0[idx as usize] 72 } 73 } 74 75 impl std::ops::Index<u32> for Fifo { 76 type Output = registers::Data; 77 78 fn index(&self, idx: u32) -> &Self::Output { 79 &self.0[idx as usize] 80 } 81 } 82 83 #[repr(C)] 84 #[derive(Debug, Default, qemu_api_macros::offsets)] 85 pub struct PL011Registers { 86 #[doc(alias = "fr")] 87 pub flags: registers::Flags, 88 #[doc(alias = "lcr")] 89 pub line_control: registers::LineControl, 90 #[doc(alias = "rsr")] 91 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 92 #[doc(alias = "cr")] 93 pub control: registers::Control, 94 pub dmacr: u32, 95 pub int_enabled: u32, 96 pub int_level: u32, 97 pub read_fifo: Fifo, 98 pub ilpr: u32, 99 pub ibrd: u32, 100 pub fbrd: u32, 101 pub ifl: u32, 102 pub read_pos: u32, 103 pub read_count: u32, 104 pub read_trigger: u32, 105 } 106 107 #[repr(C)] 108 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 109 /// PL011 Device Model in QEMU 110 pub struct PL011State { 111 pub parent_obj: ParentField<SysBusDevice>, 112 pub iomem: MemoryRegion, 113 #[doc(alias = "chr")] 114 pub char_backend: CharBackend, 115 pub regs: BqlRefCell<PL011Registers>, 116 /// QEMU interrupts 117 /// 118 /// ```text 119 /// * sysbus MMIO region 0: device registers 120 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 121 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 122 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 123 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 124 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 125 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 126 /// ``` 127 #[doc(alias = "irq")] 128 pub interrupts: [InterruptSource; IRQMASK.len()], 129 #[doc(alias = "clk")] 130 pub clock: Owned<Clock>, 131 #[doc(alias = "migrate_clk")] 132 pub migrate_clock: bool, 133 } 134 135 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 136 137 #[repr(C)] 138 pub struct PL011Class { 139 parent_class: <SysBusDevice as ObjectType>::Class, 140 /// The byte string that identifies the device. 141 device_id: DeviceId, 142 } 143 144 trait PL011Impl: SysBusDeviceImpl + IsA<PL011State> { 145 const DEVICE_ID: DeviceId; 146 } 147 148 impl PL011Class { 149 fn class_init<T: PL011Impl>(&mut self) { 150 self.device_id = T::DEVICE_ID; 151 self.parent_class.class_init::<T>(); 152 } 153 } 154 155 unsafe impl ObjectType for PL011State { 156 type Class = PL011Class; 157 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 158 } 159 160 impl PL011Impl for PL011State { 161 const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 162 } 163 164 impl ObjectImpl for PL011State { 165 type ParentType = SysBusDevice; 166 167 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 168 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 169 const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>; 170 } 171 172 impl DeviceImpl for PL011State { 173 fn properties() -> &'static [Property] { 174 &device_class::PL011_PROPERTIES 175 } 176 fn vmsd() -> Option<&'static VMStateDescription> { 177 Some(&device_class::VMSTATE_PL011) 178 } 179 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 180 } 181 182 impl ResettablePhasesImpl for PL011State { 183 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 184 } 185 186 impl SysBusDeviceImpl for PL011State {} 187 188 impl PL011Registers { 189 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 190 use RegisterOffset::*; 191 192 let mut update = false; 193 let result = match offset { 194 DR => { 195 self.flags.set_receive_fifo_full(false); 196 let c = self.read_fifo[self.read_pos]; 197 if self.read_count > 0 { 198 self.read_count -= 1; 199 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 200 } 201 if self.read_count == 0 { 202 self.flags.set_receive_fifo_empty(true); 203 } 204 if self.read_count + 1 == self.read_trigger { 205 self.int_level &= !Interrupt::RX.0; 206 } 207 // Update error bits. 208 self.receive_status_error_clear.set_from_data(c); 209 // Must call qemu_chr_fe_accept_input 210 update = true; 211 u32::from(c) 212 } 213 RSR => u32::from(self.receive_status_error_clear), 214 FR => u32::from(self.flags), 215 FBRD => self.fbrd, 216 ILPR => self.ilpr, 217 IBRD => self.ibrd, 218 LCR_H => u32::from(self.line_control), 219 CR => u32::from(self.control), 220 FLS => self.ifl, 221 IMSC => self.int_enabled, 222 RIS => self.int_level, 223 MIS => self.int_level & self.int_enabled, 224 ICR => { 225 // "The UARTICR Register is the interrupt clear register and is write-only" 226 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 227 0 228 } 229 DMACR => self.dmacr, 230 }; 231 (update, result) 232 } 233 234 pub(self) fn write( 235 &mut self, 236 offset: RegisterOffset, 237 value: u32, 238 char_backend: *mut CharBackend, 239 ) -> bool { 240 // eprintln!("write offset {offset} value {value}"); 241 use RegisterOffset::*; 242 match offset { 243 DR => { 244 // interrupts always checked 245 let _ = self.loopback_tx(value); 246 self.int_level |= Interrupt::TX.0; 247 return true; 248 } 249 RSR => { 250 self.receive_status_error_clear = 0.into(); 251 } 252 FR => { 253 // flag writes are ignored 254 } 255 ILPR => { 256 self.ilpr = value; 257 } 258 IBRD => { 259 self.ibrd = value; 260 } 261 FBRD => { 262 self.fbrd = value; 263 } 264 LCR_H => { 265 let new_val: registers::LineControl = value.into(); 266 // Reset the FIFO state on FIFO enable or disable 267 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 268 self.reset_rx_fifo(); 269 self.reset_tx_fifo(); 270 } 271 let update = (self.line_control.send_break() != new_val.send_break()) && { 272 let mut break_enable: c_int = new_val.send_break().into(); 273 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 274 // initialized in realize(). 275 unsafe { 276 qemu_chr_fe_ioctl( 277 char_backend, 278 CHR_IOCTL_SERIAL_SET_BREAK as i32, 279 addr_of_mut!(break_enable).cast::<c_void>(), 280 ); 281 } 282 self.loopback_break(break_enable > 0) 283 }; 284 self.line_control = new_val; 285 self.set_read_trigger(); 286 return update; 287 } 288 CR => { 289 // ??? Need to implement the enable bit. 290 self.control = value.into(); 291 return self.loopback_mdmctrl(); 292 } 293 FLS => { 294 self.ifl = value; 295 self.set_read_trigger(); 296 } 297 IMSC => { 298 self.int_enabled = value; 299 return true; 300 } 301 RIS => {} 302 MIS => {} 303 ICR => { 304 self.int_level &= !value; 305 return true; 306 } 307 DMACR => { 308 self.dmacr = value; 309 if value & 3 > 0 { 310 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 311 eprintln!("pl011: DMA not implemented"); 312 } 313 } 314 } 315 false 316 } 317 318 #[inline] 319 #[must_use] 320 fn loopback_tx(&mut self, value: u32) -> bool { 321 // Caveat: 322 // 323 // In real hardware, TX loopback happens at the serial-bit level 324 // and then reassembled by the RX logics back into bytes and placed 325 // into the RX fifo. That is, loopback happens after TX fifo. 326 // 327 // Because the real hardware TX fifo is time-drained at the frame 328 // rate governed by the configured serial format, some loopback 329 // bytes in TX fifo may still be able to get into the RX fifo 330 // that could be full at times while being drained at software 331 // pace. 332 // 333 // In such scenario, the RX draining pace is the major factor 334 // deciding which loopback bytes get into the RX fifo, unless 335 // hardware flow-control is enabled. 336 // 337 // For simplicity, the above described is not emulated. 338 self.loopback_enabled() && self.put_fifo(value) 339 } 340 341 #[must_use] 342 fn loopback_mdmctrl(&mut self) -> bool { 343 if !self.loopback_enabled() { 344 return false; 345 } 346 347 /* 348 * Loopback software-driven modem control outputs to modem status inputs: 349 * FR.RI <= CR.Out2 350 * FR.DCD <= CR.Out1 351 * FR.CTS <= CR.RTS 352 * FR.DSR <= CR.DTR 353 * 354 * The loopback happens immediately even if this call is triggered 355 * by setting only CR.LBE. 356 * 357 * CTS/RTS updates due to enabled hardware flow controls are not 358 * dealt with here. 359 */ 360 361 self.flags.set_ring_indicator(self.control.out_2()); 362 self.flags.set_data_carrier_detect(self.control.out_1()); 363 self.flags.set_clear_to_send(self.control.request_to_send()); 364 self.flags 365 .set_data_set_ready(self.control.data_transmit_ready()); 366 367 // Change interrupts based on updated FR 368 let mut il = self.int_level; 369 370 il &= !Interrupt::MS.0; 371 372 if self.flags.data_set_ready() { 373 il |= Interrupt::DSR.0; 374 } 375 if self.flags.data_carrier_detect() { 376 il |= Interrupt::DCD.0; 377 } 378 if self.flags.clear_to_send() { 379 il |= Interrupt::CTS.0; 380 } 381 if self.flags.ring_indicator() { 382 il |= Interrupt::RI.0; 383 } 384 self.int_level = il; 385 true 386 } 387 388 fn loopback_break(&mut self, enable: bool) -> bool { 389 enable && self.loopback_tx(registers::Data::BREAK.into()) 390 } 391 392 fn set_read_trigger(&mut self) { 393 self.read_trigger = 1; 394 } 395 396 pub fn reset(&mut self) { 397 self.line_control.reset(); 398 self.receive_status_error_clear.reset(); 399 self.dmacr = 0; 400 self.int_enabled = 0; 401 self.int_level = 0; 402 self.ilpr = 0; 403 self.ibrd = 0; 404 self.fbrd = 0; 405 self.read_trigger = 1; 406 self.ifl = 0x12; 407 self.control.reset(); 408 self.flags.reset(); 409 self.reset_rx_fifo(); 410 self.reset_tx_fifo(); 411 } 412 413 pub fn reset_rx_fifo(&mut self) { 414 self.read_count = 0; 415 self.read_pos = 0; 416 417 // Reset FIFO flags 418 self.flags.set_receive_fifo_full(false); 419 self.flags.set_receive_fifo_empty(true); 420 } 421 422 pub fn reset_tx_fifo(&mut self) { 423 // Reset FIFO flags 424 self.flags.set_transmit_fifo_full(false); 425 self.flags.set_transmit_fifo_empty(true); 426 } 427 428 #[inline] 429 pub fn fifo_enabled(&self) -> bool { 430 self.line_control.fifos_enabled() == registers::Mode::FIFO 431 } 432 433 #[inline] 434 pub fn loopback_enabled(&self) -> bool { 435 self.control.enable_loopback() 436 } 437 438 #[inline] 439 pub fn fifo_depth(&self) -> u32 { 440 // Note: FIFO depth is expected to be power-of-2 441 if self.fifo_enabled() { 442 return PL011_FIFO_DEPTH; 443 } 444 1 445 } 446 447 #[must_use] 448 pub fn put_fifo(&mut self, value: u32) -> bool { 449 let depth = self.fifo_depth(); 450 assert!(depth > 0); 451 let slot = (self.read_pos + self.read_count) & (depth - 1); 452 self.read_fifo[slot] = registers::Data::from(value); 453 self.read_count += 1; 454 self.flags.set_receive_fifo_empty(false); 455 if self.read_count == depth { 456 self.flags.set_receive_fifo_full(true); 457 } 458 459 if self.read_count == self.read_trigger { 460 self.int_level |= Interrupt::RX.0; 461 return true; 462 } 463 false 464 } 465 466 pub fn post_load(&mut self) -> Result<(), ()> { 467 /* Sanity-check input state */ 468 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 469 return Err(()); 470 } 471 472 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 473 // Older versions of PL011 didn't ensure that the single 474 // character in the FIFO in FIFO-disabled mode is in 475 // element 0 of the array; convert to follow the current 476 // code's assumptions. 477 self.read_fifo[0] = self.read_fifo[self.read_pos]; 478 self.read_pos = 0; 479 } 480 481 self.ibrd &= IBRD_MASK; 482 self.fbrd &= FBRD_MASK; 483 484 Ok(()) 485 } 486 } 487 488 impl PL011State { 489 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 490 /// 491 /// # Safety 492 /// 493 /// `self` must point to a correctly sized and aligned location for the 494 /// `PL011State` type. It must not be called more than once on the same 495 /// location/instance. All its fields are expected to hold unitialized 496 /// values with the sole exception of `parent_obj`. 497 unsafe fn init(&mut self) { 498 static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new() 499 .read(&PL011State::read) 500 .write(&PL011State::write) 501 .native_endian() 502 .impl_sizes(4, 4) 503 .build(); 504 505 // SAFETY: 506 // 507 // self and self.iomem are guaranteed to be valid at this point since callers 508 // must make sure the `self` reference is valid. 509 MemoryRegion::init_io( 510 unsafe { &mut *addr_of_mut!(self.iomem) }, 511 addr_of_mut!(*self), 512 &PL011_OPS, 513 "pl011", 514 0x1000, 515 ); 516 517 self.regs = Default::default(); 518 519 // SAFETY: 520 // 521 // self.clock is not initialized at this point; but since `Owned<_>` is 522 // not Drop, we can overwrite the undefined value without side effects; 523 // it's not sound but, because for all PL011State instances are created 524 // by QOM code which calls this function to initialize the fields, at 525 // leastno code is able to access an invalid self.clock value. 526 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 527 } 528 529 const fn clock_update(&self, _event: ClockEvent) { 530 /* pl011_trace_baudrate_change(s); */ 531 } 532 533 fn post_init(&self) { 534 self.init_mmio(&self.iomem); 535 for irq in self.interrupts.iter() { 536 self.init_irq(irq); 537 } 538 } 539 540 fn read(&self, offset: hwaddr, _size: u32) -> u64 { 541 match RegisterOffset::try_from(offset) { 542 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 543 let device_id = self.get_class().device_id; 544 u64::from(device_id[(offset - 0xfe0) >> 2]) 545 } 546 Err(_) => { 547 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 548 0 549 } 550 Ok(field) => { 551 let (update_irq, result) = self.regs.borrow_mut().read(field); 552 if update_irq { 553 self.update(); 554 unsafe { 555 qemu_chr_fe_accept_input(addr_of!(self.char_backend) as *mut _); 556 } 557 } 558 result.into() 559 } 560 } 561 } 562 563 fn write(&self, offset: hwaddr, value: u64, _size: u32) { 564 let mut update_irq = false; 565 if let Ok(field) = RegisterOffset::try_from(offset) { 566 // qemu_chr_fe_write_all() calls into the can_receive 567 // callback, so handle writes before entering PL011Registers. 568 if field == RegisterOffset::DR { 569 // ??? Check if transmitter is enabled. 570 let ch: u8 = value as u8; 571 // SAFETY: char_backend is a valid CharBackend instance after it's been 572 // initialized in realize(). 573 // XXX this blocks entire thread. Rewrite to use 574 // qemu_chr_fe_write and background I/O callbacks 575 unsafe { 576 qemu_chr_fe_write_all(addr_of!(self.char_backend) as *mut _, &ch, 1); 577 } 578 } 579 580 update_irq = self.regs.borrow_mut().write( 581 field, 582 value as u32, 583 addr_of!(self.char_backend) as *mut _, 584 ); 585 } else { 586 eprintln!("write bad offset {offset} value {value}"); 587 } 588 if update_irq { 589 self.update(); 590 } 591 } 592 593 pub fn can_receive(&self) -> bool { 594 // trace_pl011_can_receive(s->lcr, s->read_count, r); 595 let regs = self.regs.borrow(); 596 regs.read_count < regs.fifo_depth() 597 } 598 599 pub fn receive(&self, ch: u32) { 600 let mut regs = self.regs.borrow_mut(); 601 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 602 // Release the BqlRefCell before calling self.update() 603 drop(regs); 604 605 if update_irq { 606 self.update(); 607 } 608 } 609 610 pub fn event(&self, event: QEMUChrEvent) { 611 let mut update_irq = false; 612 let mut regs = self.regs.borrow_mut(); 613 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 614 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 615 } 616 // Release the BqlRefCell before calling self.update() 617 drop(regs); 618 619 if update_irq { 620 self.update() 621 } 622 } 623 624 fn realize(&self) { 625 // SAFETY: self.char_backend has the correct size and alignment for a 626 // CharBackend object, and its callbacks are of the correct types. 627 unsafe { 628 qemu_chr_fe_set_handlers( 629 addr_of!(self.char_backend) as *mut CharBackend, 630 Some(pl011_can_receive), 631 Some(pl011_receive), 632 Some(pl011_event), 633 None, 634 addr_of!(*self).cast::<c_void>() as *mut c_void, 635 core::ptr::null_mut(), 636 true, 637 ); 638 } 639 } 640 641 fn reset_hold(&self, _type: ResetType) { 642 self.regs.borrow_mut().reset(); 643 } 644 645 fn update(&self) { 646 let regs = self.regs.borrow(); 647 let flags = regs.int_level & regs.int_enabled; 648 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 649 irq.set(flags & i != 0); 650 } 651 } 652 653 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 654 self.regs.borrow_mut().post_load() 655 } 656 } 657 658 /// Which bits in the interrupt status matter for each outbound IRQ line ? 659 const IRQMASK: [u32; 6] = [ 660 /* combined IRQ */ 661 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 662 Interrupt::RX.0, 663 Interrupt::TX.0, 664 Interrupt::RT.0, 665 Interrupt::MS.0, 666 Interrupt::E.0, 667 ]; 668 669 /// # Safety 670 /// 671 /// We expect the FFI user of this function to pass a valid pointer, that has 672 /// the same size as [`PL011State`]. We also expect the device is 673 /// readable/writeable from one thread at any time. 674 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 675 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 676 unsafe { state.as_ref().can_receive().into() } 677 } 678 679 /// # Safety 680 /// 681 /// We expect the FFI user of this function to pass a valid pointer, that has 682 /// the same size as [`PL011State`]. We also expect the device is 683 /// readable/writeable from one thread at any time. 684 /// 685 /// The buffer and size arguments must also be valid. 686 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 687 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 688 unsafe { 689 if size > 0 { 690 debug_assert!(!buf.is_null()); 691 state.as_ref().receive(u32::from(buf.read_volatile())); 692 } 693 } 694 } 695 696 /// # Safety 697 /// 698 /// We expect the FFI user of this function to pass a valid pointer, that has 699 /// the same size as [`PL011State`]. We also expect the device is 700 /// readable/writeable from one thread at any time. 701 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 702 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 703 unsafe { state.as_ref().event(event) } 704 } 705 706 /// # Safety 707 /// 708 /// We expect the FFI user of this function to pass a valid pointer for `chr` 709 /// and `irq`. 710 #[no_mangle] 711 pub unsafe extern "C" fn pl011_create( 712 addr: u64, 713 irq: *mut IRQState, 714 chr: *mut Chardev, 715 ) -> *mut DeviceState { 716 // SAFETY: The callers promise that they have owned references. 717 // They do not gift them to pl011_create, so use `Owned::from`. 718 let irq = unsafe { Owned::<IRQState>::from(&*irq) }; 719 let chr = unsafe { Owned::<Chardev>::from(&*chr) }; 720 721 let dev = PL011State::new(); 722 dev.prop_set_chr("chardev", &chr); 723 dev.sysbus_realize(); 724 dev.mmio_map(0, addr); 725 dev.connect_irq(0, &irq); 726 727 // The pointer is kept alive by the QOM tree; drop the owned ref 728 dev.as_mut_ptr() 729 } 730 731 #[repr(C)] 732 #[derive(qemu_api_macros::Object)] 733 /// PL011 Luminary device model. 734 pub struct PL011Luminary { 735 parent_obj: ParentField<PL011State>, 736 } 737 738 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 739 740 unsafe impl ObjectType for PL011Luminary { 741 type Class = <PL011State as ObjectType>::Class; 742 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 743 } 744 745 impl ObjectImpl for PL011Luminary { 746 type ParentType = PL011State; 747 748 const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::<Self>; 749 } 750 751 impl PL011Impl for PL011Luminary { 752 const DEVICE_ID: DeviceId = DeviceId(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 753 } 754 755 impl DeviceImpl for PL011Luminary {} 756 impl ResettablePhasesImpl for PL011Luminary {} 757 impl SysBusDeviceImpl for PL011Luminary {} 758