1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use core::ptr::{addr_of, addr_of_mut, NonNull}; 6 use std::{ 7 ffi::CStr, 8 os::raw::{c_int, c_void}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 error_fatal, qdev_prop_set_chr, qemu_chr_fe_accept_input, qemu_chr_fe_ioctl, 14 qemu_chr_fe_set_handlers, qemu_chr_fe_write_all, qemu_irq, sysbus_connect_irq, 15 sysbus_mmio_map, sysbus_realize, CharBackend, QEMUChrEvent, 16 CHR_IOCTL_SERIAL_SET_BREAK, 17 }, 18 chardev::Chardev, 19 c_str, impl_vmstate_forward, 20 irq::InterruptSource, 21 memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, 22 prelude::*, 23 qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, 24 qom::{ClassInitImpl, ObjectImpl, Owned, ParentField}, 25 sysbus::{SysBusDevice, SysBusDeviceClass}, 26 vmstate::VMStateDescription, 27 }; 28 29 use crate::{ 30 device_class, 31 registers::{self, Interrupt}, 32 RegisterOffset, 33 }; 34 35 /// Integer Baud Rate Divider, `UARTIBRD` 36 const IBRD_MASK: u32 = 0xffff; 37 38 /// Fractional Baud Rate Divider, `UARTFBRD` 39 const FBRD_MASK: u32 = 0x3f; 40 41 /// QEMU sourced constant. 42 pub const PL011_FIFO_DEPTH: u32 = 16; 43 44 #[derive(Clone, Copy)] 45 struct DeviceId(&'static [u8; 8]); 46 47 impl std::ops::Index<hwaddr> for DeviceId { 48 type Output = u8; 49 50 fn index(&self, idx: hwaddr) -> &Self::Output { 51 &self.0[idx as usize] 52 } 53 } 54 55 impl DeviceId { 56 const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 57 const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 58 } 59 60 // FIFOs use 32-bit indices instead of usize, for compatibility with 61 // the migration stream produced by the C version of this device. 62 #[repr(transparent)] 63 #[derive(Debug, Default)] 64 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 65 impl_vmstate_forward!(Fifo); 66 67 impl Fifo { 68 const fn len(&self) -> u32 { 69 self.0.len() as u32 70 } 71 } 72 73 impl std::ops::IndexMut<u32> for Fifo { 74 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 75 &mut self.0[idx as usize] 76 } 77 } 78 79 impl std::ops::Index<u32> for Fifo { 80 type Output = registers::Data; 81 82 fn index(&self, idx: u32) -> &Self::Output { 83 &self.0[idx as usize] 84 } 85 } 86 87 #[repr(C)] 88 #[derive(Debug, Default, qemu_api_macros::offsets)] 89 pub struct PL011Registers { 90 #[doc(alias = "fr")] 91 pub flags: registers::Flags, 92 #[doc(alias = "lcr")] 93 pub line_control: registers::LineControl, 94 #[doc(alias = "rsr")] 95 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 96 #[doc(alias = "cr")] 97 pub control: registers::Control, 98 pub dmacr: u32, 99 pub int_enabled: u32, 100 pub int_level: u32, 101 pub read_fifo: Fifo, 102 pub ilpr: u32, 103 pub ibrd: u32, 104 pub fbrd: u32, 105 pub ifl: u32, 106 pub read_pos: u32, 107 pub read_count: u32, 108 pub read_trigger: u32, 109 } 110 111 #[repr(C)] 112 #[derive(qemu_api_macros::Object, qemu_api_macros::offsets)] 113 /// PL011 Device Model in QEMU 114 pub struct PL011State { 115 pub parent_obj: ParentField<SysBusDevice>, 116 pub iomem: MemoryRegion, 117 #[doc(alias = "chr")] 118 pub char_backend: CharBackend, 119 pub regs: BqlRefCell<PL011Registers>, 120 /// QEMU interrupts 121 /// 122 /// ```text 123 /// * sysbus MMIO region 0: device registers 124 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 125 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 126 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 127 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 128 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 129 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 130 /// ``` 131 #[doc(alias = "irq")] 132 pub interrupts: [InterruptSource; IRQMASK.len()], 133 #[doc(alias = "clk")] 134 pub clock: Owned<Clock>, 135 #[doc(alias = "migrate_clk")] 136 pub migrate_clock: bool, 137 } 138 139 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 140 141 #[repr(C)] 142 pub struct PL011Class { 143 parent_class: <SysBusDevice as ObjectType>::Class, 144 /// The byte string that identifies the device. 145 device_id: DeviceId, 146 } 147 148 unsafe impl ObjectType for PL011State { 149 type Class = PL011Class; 150 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 151 } 152 153 impl ClassInitImpl<PL011Class> for PL011State { 154 fn class_init(klass: &mut PL011Class) { 155 klass.device_id = DeviceId::ARM; 156 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 157 } 158 } 159 160 impl ObjectImpl for PL011State { 161 type ParentType = SysBusDevice; 162 163 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 164 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 165 } 166 167 impl DeviceImpl for PL011State { 168 fn properties() -> &'static [Property] { 169 &device_class::PL011_PROPERTIES 170 } 171 fn vmsd() -> Option<&'static VMStateDescription> { 172 Some(&device_class::VMSTATE_PL011) 173 } 174 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 175 } 176 177 impl ResettablePhasesImpl for PL011State { 178 const HOLD: Option<fn(&Self, ResetType)> = Some(Self::reset_hold); 179 } 180 181 impl PL011Registers { 182 pub(self) fn read(&mut self, offset: RegisterOffset) -> (bool, u32) { 183 use RegisterOffset::*; 184 185 let mut update = false; 186 let result = match offset { 187 DR => { 188 self.flags.set_receive_fifo_full(false); 189 let c = self.read_fifo[self.read_pos]; 190 if self.read_count > 0 { 191 self.read_count -= 1; 192 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 193 } 194 if self.read_count == 0 { 195 self.flags.set_receive_fifo_empty(true); 196 } 197 if self.read_count + 1 == self.read_trigger { 198 self.int_level &= !Interrupt::RX.0; 199 } 200 // Update error bits. 201 self.receive_status_error_clear.set_from_data(c); 202 // Must call qemu_chr_fe_accept_input 203 update = true; 204 u32::from(c) 205 } 206 RSR => u32::from(self.receive_status_error_clear), 207 FR => u32::from(self.flags), 208 FBRD => self.fbrd, 209 ILPR => self.ilpr, 210 IBRD => self.ibrd, 211 LCR_H => u32::from(self.line_control), 212 CR => u32::from(self.control), 213 FLS => self.ifl, 214 IMSC => self.int_enabled, 215 RIS => self.int_level, 216 MIS => self.int_level & self.int_enabled, 217 ICR => { 218 // "The UARTICR Register is the interrupt clear register and is write-only" 219 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 220 0 221 } 222 DMACR => self.dmacr, 223 }; 224 (update, result) 225 } 226 227 pub(self) fn write( 228 &mut self, 229 offset: RegisterOffset, 230 value: u32, 231 char_backend: *mut CharBackend, 232 ) -> bool { 233 // eprintln!("write offset {offset} value {value}"); 234 use RegisterOffset::*; 235 match offset { 236 DR => { 237 // interrupts always checked 238 let _ = self.loopback_tx(value); 239 self.int_level |= Interrupt::TX.0; 240 return true; 241 } 242 RSR => { 243 self.receive_status_error_clear = 0.into(); 244 } 245 FR => { 246 // flag writes are ignored 247 } 248 ILPR => { 249 self.ilpr = value; 250 } 251 IBRD => { 252 self.ibrd = value; 253 } 254 FBRD => { 255 self.fbrd = value; 256 } 257 LCR_H => { 258 let new_val: registers::LineControl = value.into(); 259 // Reset the FIFO state on FIFO enable or disable 260 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 261 self.reset_rx_fifo(); 262 self.reset_tx_fifo(); 263 } 264 let update = (self.line_control.send_break() != new_val.send_break()) && { 265 let mut break_enable: c_int = new_val.send_break().into(); 266 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 267 // initialized in realize(). 268 unsafe { 269 qemu_chr_fe_ioctl( 270 char_backend, 271 CHR_IOCTL_SERIAL_SET_BREAK as i32, 272 addr_of_mut!(break_enable).cast::<c_void>(), 273 ); 274 } 275 self.loopback_break(break_enable > 0) 276 }; 277 self.line_control = new_val; 278 self.set_read_trigger(); 279 return update; 280 } 281 CR => { 282 // ??? Need to implement the enable bit. 283 self.control = value.into(); 284 return self.loopback_mdmctrl(); 285 } 286 FLS => { 287 self.ifl = value; 288 self.set_read_trigger(); 289 } 290 IMSC => { 291 self.int_enabled = value; 292 return true; 293 } 294 RIS => {} 295 MIS => {} 296 ICR => { 297 self.int_level &= !value; 298 return true; 299 } 300 DMACR => { 301 self.dmacr = value; 302 if value & 3 > 0 { 303 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 304 eprintln!("pl011: DMA not implemented"); 305 } 306 } 307 } 308 false 309 } 310 311 #[inline] 312 #[must_use] 313 fn loopback_tx(&mut self, value: u32) -> bool { 314 // Caveat: 315 // 316 // In real hardware, TX loopback happens at the serial-bit level 317 // and then reassembled by the RX logics back into bytes and placed 318 // into the RX fifo. That is, loopback happens after TX fifo. 319 // 320 // Because the real hardware TX fifo is time-drained at the frame 321 // rate governed by the configured serial format, some loopback 322 // bytes in TX fifo may still be able to get into the RX fifo 323 // that could be full at times while being drained at software 324 // pace. 325 // 326 // In such scenario, the RX draining pace is the major factor 327 // deciding which loopback bytes get into the RX fifo, unless 328 // hardware flow-control is enabled. 329 // 330 // For simplicity, the above described is not emulated. 331 self.loopback_enabled() && self.put_fifo(value) 332 } 333 334 #[must_use] 335 fn loopback_mdmctrl(&mut self) -> bool { 336 if !self.loopback_enabled() { 337 return false; 338 } 339 340 /* 341 * Loopback software-driven modem control outputs to modem status inputs: 342 * FR.RI <= CR.Out2 343 * FR.DCD <= CR.Out1 344 * FR.CTS <= CR.RTS 345 * FR.DSR <= CR.DTR 346 * 347 * The loopback happens immediately even if this call is triggered 348 * by setting only CR.LBE. 349 * 350 * CTS/RTS updates due to enabled hardware flow controls are not 351 * dealt with here. 352 */ 353 354 self.flags.set_ring_indicator(self.control.out_2()); 355 self.flags.set_data_carrier_detect(self.control.out_1()); 356 self.flags.set_clear_to_send(self.control.request_to_send()); 357 self.flags 358 .set_data_set_ready(self.control.data_transmit_ready()); 359 360 // Change interrupts based on updated FR 361 let mut il = self.int_level; 362 363 il &= !Interrupt::MS.0; 364 365 if self.flags.data_set_ready() { 366 il |= Interrupt::DSR.0; 367 } 368 if self.flags.data_carrier_detect() { 369 il |= Interrupt::DCD.0; 370 } 371 if self.flags.clear_to_send() { 372 il |= Interrupt::CTS.0; 373 } 374 if self.flags.ring_indicator() { 375 il |= Interrupt::RI.0; 376 } 377 self.int_level = il; 378 true 379 } 380 381 fn loopback_break(&mut self, enable: bool) -> bool { 382 enable && self.loopback_tx(registers::Data::BREAK.into()) 383 } 384 385 fn set_read_trigger(&mut self) { 386 self.read_trigger = 1; 387 } 388 389 pub fn reset(&mut self) { 390 self.line_control.reset(); 391 self.receive_status_error_clear.reset(); 392 self.dmacr = 0; 393 self.int_enabled = 0; 394 self.int_level = 0; 395 self.ilpr = 0; 396 self.ibrd = 0; 397 self.fbrd = 0; 398 self.read_trigger = 1; 399 self.ifl = 0x12; 400 self.control.reset(); 401 self.flags.reset(); 402 self.reset_rx_fifo(); 403 self.reset_tx_fifo(); 404 } 405 406 pub fn reset_rx_fifo(&mut self) { 407 self.read_count = 0; 408 self.read_pos = 0; 409 410 // Reset FIFO flags 411 self.flags.set_receive_fifo_full(false); 412 self.flags.set_receive_fifo_empty(true); 413 } 414 415 pub fn reset_tx_fifo(&mut self) { 416 // Reset FIFO flags 417 self.flags.set_transmit_fifo_full(false); 418 self.flags.set_transmit_fifo_empty(true); 419 } 420 421 #[inline] 422 pub fn fifo_enabled(&self) -> bool { 423 self.line_control.fifos_enabled() == registers::Mode::FIFO 424 } 425 426 #[inline] 427 pub fn loopback_enabled(&self) -> bool { 428 self.control.enable_loopback() 429 } 430 431 #[inline] 432 pub fn fifo_depth(&self) -> u32 { 433 // Note: FIFO depth is expected to be power-of-2 434 if self.fifo_enabled() { 435 return PL011_FIFO_DEPTH; 436 } 437 1 438 } 439 440 #[must_use] 441 pub fn put_fifo(&mut self, value: u32) -> bool { 442 let depth = self.fifo_depth(); 443 assert!(depth > 0); 444 let slot = (self.read_pos + self.read_count) & (depth - 1); 445 self.read_fifo[slot] = registers::Data::from(value); 446 self.read_count += 1; 447 self.flags.set_receive_fifo_empty(false); 448 if self.read_count == depth { 449 self.flags.set_receive_fifo_full(true); 450 } 451 452 if self.read_count == self.read_trigger { 453 self.int_level |= Interrupt::RX.0; 454 return true; 455 } 456 false 457 } 458 459 pub fn post_load(&mut self) -> Result<(), ()> { 460 /* Sanity-check input state */ 461 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 462 return Err(()); 463 } 464 465 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 466 // Older versions of PL011 didn't ensure that the single 467 // character in the FIFO in FIFO-disabled mode is in 468 // element 0 of the array; convert to follow the current 469 // code's assumptions. 470 self.read_fifo[0] = self.read_fifo[self.read_pos]; 471 self.read_pos = 0; 472 } 473 474 self.ibrd &= IBRD_MASK; 475 self.fbrd &= FBRD_MASK; 476 477 Ok(()) 478 } 479 } 480 481 impl PL011State { 482 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 483 /// 484 /// # Safety 485 /// 486 /// `self` must point to a correctly sized and aligned location for the 487 /// `PL011State` type. It must not be called more than once on the same 488 /// location/instance. All its fields are expected to hold unitialized 489 /// values with the sole exception of `parent_obj`. 490 unsafe fn init(&mut self) { 491 static PL011_OPS: MemoryRegionOps<PL011State> = MemoryRegionOpsBuilder::<PL011State>::new() 492 .read(&PL011State::read) 493 .write(&PL011State::write) 494 .native_endian() 495 .impl_sizes(4, 4) 496 .build(); 497 498 // SAFETY: 499 // 500 // self and self.iomem are guaranteed to be valid at this point since callers 501 // must make sure the `self` reference is valid. 502 MemoryRegion::init_io( 503 unsafe { &mut *addr_of_mut!(self.iomem) }, 504 addr_of_mut!(*self), 505 &PL011_OPS, 506 "pl011", 507 0x1000, 508 ); 509 510 self.regs = Default::default(); 511 512 // SAFETY: 513 // 514 // self.clock is not initialized at this point; but since `Owned<_>` is 515 // not Drop, we can overwrite the undefined value without side effects; 516 // it's not sound but, because for all PL011State instances are created 517 // by QOM code which calls this function to initialize the fields, at 518 // leastno code is able to access an invalid self.clock value. 519 self.clock = self.init_clock_in("clk", &Self::clock_update, ClockEvent::ClockUpdate); 520 } 521 522 const fn clock_update(&self, _event: ClockEvent) { 523 /* pl011_trace_baudrate_change(s); */ 524 } 525 526 fn post_init(&self) { 527 self.init_mmio(&self.iomem); 528 for irq in self.interrupts.iter() { 529 self.init_irq(irq); 530 } 531 } 532 533 pub fn read(&self, offset: hwaddr, _size: u32) -> u64 { 534 match RegisterOffset::try_from(offset) { 535 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 536 let device_id = self.get_class().device_id; 537 u64::from(device_id[(offset - 0xfe0) >> 2]) 538 } 539 Err(_) => { 540 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 541 0 542 } 543 Ok(field) => { 544 let (update_irq, result) = self.regs.borrow_mut().read(field); 545 if update_irq { 546 self.update(); 547 unsafe { 548 qemu_chr_fe_accept_input(addr_of!(self.char_backend) as *mut _); 549 } 550 } 551 result.into() 552 } 553 } 554 } 555 556 pub fn write(&self, offset: hwaddr, value: u64, _size: u32) { 557 let mut update_irq = false; 558 if let Ok(field) = RegisterOffset::try_from(offset) { 559 // qemu_chr_fe_write_all() calls into the can_receive 560 // callback, so handle writes before entering PL011Registers. 561 if field == RegisterOffset::DR { 562 // ??? Check if transmitter is enabled. 563 let ch: u8 = value as u8; 564 // SAFETY: char_backend is a valid CharBackend instance after it's been 565 // initialized in realize(). 566 // XXX this blocks entire thread. Rewrite to use 567 // qemu_chr_fe_write and background I/O callbacks 568 unsafe { 569 qemu_chr_fe_write_all(addr_of!(self.char_backend) as *mut _, &ch, 1); 570 } 571 } 572 573 update_irq = self.regs.borrow_mut().write( 574 field, 575 value as u32, 576 addr_of!(self.char_backend) as *mut _, 577 ); 578 } else { 579 eprintln!("write bad offset {offset} value {value}"); 580 } 581 if update_irq { 582 self.update(); 583 } 584 } 585 586 pub fn can_receive(&self) -> bool { 587 // trace_pl011_can_receive(s->lcr, s->read_count, r); 588 let regs = self.regs.borrow(); 589 regs.read_count < regs.fifo_depth() 590 } 591 592 pub fn receive(&self, ch: u32) { 593 let mut regs = self.regs.borrow_mut(); 594 let update_irq = !regs.loopback_enabled() && regs.put_fifo(ch); 595 // Release the BqlRefCell before calling self.update() 596 drop(regs); 597 598 if update_irq { 599 self.update(); 600 } 601 } 602 603 pub fn event(&self, event: QEMUChrEvent) { 604 let mut update_irq = false; 605 let mut regs = self.regs.borrow_mut(); 606 if event == QEMUChrEvent::CHR_EVENT_BREAK && !regs.loopback_enabled() { 607 update_irq = regs.put_fifo(registers::Data::BREAK.into()); 608 } 609 // Release the BqlRefCell before calling self.update() 610 drop(regs); 611 612 if update_irq { 613 self.update() 614 } 615 } 616 617 pub fn realize(&self) { 618 // SAFETY: self.char_backend has the correct size and alignment for a 619 // CharBackend object, and its callbacks are of the correct types. 620 unsafe { 621 qemu_chr_fe_set_handlers( 622 addr_of!(self.char_backend) as *mut CharBackend, 623 Some(pl011_can_receive), 624 Some(pl011_receive), 625 Some(pl011_event), 626 None, 627 addr_of!(*self).cast::<c_void>() as *mut c_void, 628 core::ptr::null_mut(), 629 true, 630 ); 631 } 632 } 633 634 pub fn reset_hold(&self, _type: ResetType) { 635 self.regs.borrow_mut().reset(); 636 } 637 638 pub fn update(&self) { 639 let regs = self.regs.borrow(); 640 let flags = regs.int_level & regs.int_enabled; 641 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 642 irq.set(flags & i != 0); 643 } 644 } 645 646 pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { 647 self.regs.borrow_mut().post_load() 648 } 649 } 650 651 /// Which bits in the interrupt status matter for each outbound IRQ line ? 652 const IRQMASK: [u32; 6] = [ 653 /* combined IRQ */ 654 Interrupt::E.0 | Interrupt::MS.0 | Interrupt::RT.0 | Interrupt::TX.0 | Interrupt::RX.0, 655 Interrupt::RX.0, 656 Interrupt::TX.0, 657 Interrupt::RT.0, 658 Interrupt::MS.0, 659 Interrupt::E.0, 660 ]; 661 662 /// # Safety 663 /// 664 /// We expect the FFI user of this function to pass a valid pointer, that has 665 /// the same size as [`PL011State`]. We also expect the device is 666 /// readable/writeable from one thread at any time. 667 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 668 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 669 unsafe { state.as_ref().can_receive().into() } 670 } 671 672 /// # Safety 673 /// 674 /// We expect the FFI user of this function to pass a valid pointer, that has 675 /// the same size as [`PL011State`]. We also expect the device is 676 /// readable/writeable from one thread at any time. 677 /// 678 /// The buffer and size arguments must also be valid. 679 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 680 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 681 unsafe { 682 if size > 0 { 683 debug_assert!(!buf.is_null()); 684 state.as_ref().receive(u32::from(buf.read_volatile())); 685 } 686 } 687 } 688 689 /// # Safety 690 /// 691 /// We expect the FFI user of this function to pass a valid pointer, that has 692 /// the same size as [`PL011State`]. We also expect the device is 693 /// readable/writeable from one thread at any time. 694 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 695 let state = NonNull::new(opaque).unwrap().cast::<PL011State>(); 696 unsafe { state.as_ref().event(event) } 697 } 698 699 /// # Safety 700 /// 701 /// We expect the FFI user of this function to pass a valid pointer for `chr`. 702 #[no_mangle] 703 pub unsafe extern "C" fn pl011_create( 704 addr: u64, 705 irq: qemu_irq, 706 chr: *mut Chardev, 707 ) -> *mut DeviceState { 708 let pl011 = PL011State::new(); 709 unsafe { 710 let dev = pl011.as_mut_ptr::<DeviceState>(); 711 qdev_prop_set_chr(dev, c_str!("chardev").as_ptr(), chr); 712 713 let sysbus = pl011.as_mut_ptr::<SysBusDevice>(); 714 sysbus_realize(sysbus, addr_of_mut!(error_fatal)); 715 sysbus_mmio_map(sysbus, 0, addr); 716 sysbus_connect_irq(sysbus, 0, irq); 717 718 // return the pointer, which is kept alive by the QOM tree; drop owned ref 719 pl011.as_mut_ptr() 720 } 721 } 722 723 #[repr(C)] 724 #[derive(qemu_api_macros::Object)] 725 /// PL011 Luminary device model. 726 pub struct PL011Luminary { 727 parent_obj: ParentField<PL011State>, 728 } 729 730 impl ClassInitImpl<PL011Class> for PL011Luminary { 731 fn class_init(klass: &mut PL011Class) { 732 klass.device_id = DeviceId::LUMINARY; 733 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 734 } 735 } 736 737 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 738 739 unsafe impl ObjectType for PL011Luminary { 740 type Class = <PL011State as ObjectType>::Class; 741 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 742 } 743 744 impl ObjectImpl for PL011Luminary { 745 type ParentType = PL011State; 746 } 747 748 impl DeviceImpl for PL011Luminary {} 749 impl ResettablePhasesImpl for PL011Luminary {} 750