1 // Copyright 2024, Linaro Limited 2 // Author(s): Manos Pitsidianakis <manos.pitsidianakis@linaro.org> 3 // SPDX-License-Identifier: GPL-2.0-or-later 4 5 use core::ptr::{addr_of, addr_of_mut, NonNull}; 6 use std::{ 7 ffi::CStr, 8 os::raw::{c_int, c_uint, c_void}, 9 }; 10 11 use qemu_api::{ 12 bindings::{ 13 error_fatal, hwaddr, memory_region_init_io, qdev_init_clock_in, qdev_new, 14 qdev_prop_set_chr, qemu_chr_fe_ioctl, qemu_chr_fe_set_handlers, qemu_chr_fe_write_all, 15 qemu_irq, sysbus_connect_irq, sysbus_mmio_map, sysbus_realize_and_unref, CharBackend, 16 Chardev, Clock, ClockEvent, MemoryRegion, QEMUChrEvent, CHR_IOCTL_SERIAL_SET_BREAK, 17 }, 18 c_str, 19 irq::InterruptSource, 20 prelude::*, 21 qdev::{DeviceImpl, DeviceState, Property}, 22 qom::{ClassInitImpl, ObjectImpl, ParentField}, 23 sysbus::{SysBusDevice, SysBusDeviceClass}, 24 vmstate::VMStateDescription, 25 }; 26 27 use crate::{ 28 device_class, 29 memory_ops::PL011_OPS, 30 registers::{self, Interrupt}, 31 RegisterOffset, 32 }; 33 34 /// Integer Baud Rate Divider, `UARTIBRD` 35 const IBRD_MASK: u32 = 0xffff; 36 37 /// Fractional Baud Rate Divider, `UARTFBRD` 38 const FBRD_MASK: u32 = 0x3f; 39 40 /// QEMU sourced constant. 41 pub const PL011_FIFO_DEPTH: u32 = 16; 42 43 #[derive(Clone, Copy)] 44 struct DeviceId(&'static [u8; 8]); 45 46 impl std::ops::Index<hwaddr> for DeviceId { 47 type Output = u8; 48 49 fn index(&self, idx: hwaddr) -> &Self::Output { 50 &self.0[idx as usize] 51 } 52 } 53 54 impl DeviceId { 55 const ARM: Self = Self(&[0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1]); 56 const LUMINARY: Self = Self(&[0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1]); 57 } 58 59 // FIFOs use 32-bit indices instead of usize, for compatibility with 60 // the migration stream produced by the C version of this device. 61 #[repr(transparent)] 62 #[derive(Debug, Default)] 63 pub struct Fifo([registers::Data; PL011_FIFO_DEPTH as usize]); 64 65 impl Fifo { 66 const fn len(&self) -> u32 { 67 self.0.len() as u32 68 } 69 } 70 71 impl std::ops::IndexMut<u32> for Fifo { 72 fn index_mut(&mut self, idx: u32) -> &mut Self::Output { 73 &mut self.0[idx as usize] 74 } 75 } 76 77 impl std::ops::Index<u32> for Fifo { 78 type Output = registers::Data; 79 80 fn index(&self, idx: u32) -> &Self::Output { 81 &self.0[idx as usize] 82 } 83 } 84 85 #[repr(C)] 86 #[derive(Debug, qemu_api_macros::Object, qemu_api_macros::offsets)] 87 /// PL011 Device Model in QEMU 88 pub struct PL011State { 89 pub parent_obj: ParentField<SysBusDevice>, 90 pub iomem: MemoryRegion, 91 #[doc(alias = "fr")] 92 pub flags: registers::Flags, 93 #[doc(alias = "lcr")] 94 pub line_control: registers::LineControl, 95 #[doc(alias = "rsr")] 96 pub receive_status_error_clear: registers::ReceiveStatusErrorClear, 97 #[doc(alias = "cr")] 98 pub control: registers::Control, 99 pub dmacr: u32, 100 pub int_enabled: u32, 101 pub int_level: u32, 102 pub read_fifo: Fifo, 103 pub ilpr: u32, 104 pub ibrd: u32, 105 pub fbrd: u32, 106 pub ifl: u32, 107 pub read_pos: u32, 108 pub read_count: u32, 109 pub read_trigger: u32, 110 #[doc(alias = "chr")] 111 pub char_backend: CharBackend, 112 /// QEMU interrupts 113 /// 114 /// ```text 115 /// * sysbus MMIO region 0: device registers 116 /// * sysbus IRQ 0: `UARTINTR` (combined interrupt line) 117 /// * sysbus IRQ 1: `UARTRXINTR` (receive FIFO interrupt line) 118 /// * sysbus IRQ 2: `UARTTXINTR` (transmit FIFO interrupt line) 119 /// * sysbus IRQ 3: `UARTRTINTR` (receive timeout interrupt line) 120 /// * sysbus IRQ 4: `UARTMSINTR` (momem status interrupt line) 121 /// * sysbus IRQ 5: `UARTEINTR` (error interrupt line) 122 /// ``` 123 #[doc(alias = "irq")] 124 pub interrupts: [InterruptSource; IRQMASK.len()], 125 #[doc(alias = "clk")] 126 pub clock: NonNull<Clock>, 127 #[doc(alias = "migrate_clk")] 128 pub migrate_clock: bool, 129 } 130 131 qom_isa!(PL011State : SysBusDevice, DeviceState, Object); 132 133 #[repr(C)] 134 pub struct PL011Class { 135 parent_class: <SysBusDevice as ObjectType>::Class, 136 /// The byte string that identifies the device. 137 device_id: DeviceId, 138 } 139 140 unsafe impl ObjectType for PL011State { 141 type Class = PL011Class; 142 const TYPE_NAME: &'static CStr = crate::TYPE_PL011; 143 } 144 145 impl ClassInitImpl<PL011Class> for PL011State { 146 fn class_init(klass: &mut PL011Class) { 147 klass.device_id = DeviceId::ARM; 148 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 149 } 150 } 151 152 impl ObjectImpl for PL011State { 153 type ParentType = SysBusDevice; 154 155 const INSTANCE_INIT: Option<unsafe fn(&mut Self)> = Some(Self::init); 156 const INSTANCE_POST_INIT: Option<fn(&Self)> = Some(Self::post_init); 157 } 158 159 impl DeviceImpl for PL011State { 160 fn properties() -> &'static [Property] { 161 &device_class::PL011_PROPERTIES 162 } 163 fn vmsd() -> Option<&'static VMStateDescription> { 164 Some(&device_class::VMSTATE_PL011) 165 } 166 const REALIZE: Option<fn(&Self)> = Some(Self::realize); 167 const RESET: Option<fn(&mut Self)> = Some(Self::reset); 168 } 169 170 impl PL011State { 171 /// Initializes a pre-allocated, unitialized instance of `PL011State`. 172 /// 173 /// # Safety 174 /// 175 /// `self` must point to a correctly sized and aligned location for the 176 /// `PL011State` type. It must not be called more than once on the same 177 /// location/instance. All its fields are expected to hold unitialized 178 /// values with the sole exception of `parent_obj`. 179 unsafe fn init(&mut self) { 180 const CLK_NAME: &CStr = c_str!("clk"); 181 182 // SAFETY: 183 // 184 // self and self.iomem are guaranteed to be valid at this point since callers 185 // must make sure the `self` reference is valid. 186 unsafe { 187 memory_region_init_io( 188 addr_of_mut!(self.iomem), 189 addr_of_mut!(*self).cast::<Object>(), 190 &PL011_OPS, 191 addr_of_mut!(*self).cast::<c_void>(), 192 Self::TYPE_NAME.as_ptr(), 193 0x1000, 194 ); 195 } 196 197 // SAFETY: 198 // 199 // self.clock is not initialized at this point; but since `NonNull<_>` is Copy, 200 // we can overwrite the undefined value without side effects. This is 201 // safe since all PL011State instances are created by QOM code which 202 // calls this function to initialize the fields; therefore no code is 203 // able to access an invalid self.clock value. 204 unsafe { 205 let dev: &mut DeviceState = self.upcast_mut(); 206 self.clock = NonNull::new(qdev_init_clock_in( 207 dev, 208 CLK_NAME.as_ptr(), 209 None, /* pl011_clock_update */ 210 addr_of_mut!(*self).cast::<c_void>(), 211 ClockEvent::ClockUpdate.0, 212 )) 213 .unwrap(); 214 } 215 } 216 217 fn post_init(&self) { 218 self.init_mmio(&self.iomem); 219 for irq in self.interrupts.iter() { 220 self.init_irq(irq); 221 } 222 } 223 224 pub fn read(&mut self, offset: hwaddr, _size: c_uint) -> std::ops::ControlFlow<u64, u64> { 225 use RegisterOffset::*; 226 227 let value = match RegisterOffset::try_from(offset) { 228 Err(v) if (0x3f8..0x400).contains(&(v >> 2)) => { 229 let device_id = self.get_class().device_id; 230 u32::from(device_id[(offset - 0xfe0) >> 2]) 231 } 232 Err(_) => { 233 // qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset 0x%x\n", (int)offset); 234 0 235 } 236 Ok(DR) => { 237 self.flags.set_receive_fifo_full(false); 238 let c = self.read_fifo[self.read_pos]; 239 if self.read_count > 0 { 240 self.read_count -= 1; 241 self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); 242 } 243 if self.read_count == 0 { 244 self.flags.set_receive_fifo_empty(true); 245 } 246 if self.read_count + 1 == self.read_trigger { 247 self.int_level &= !registers::INT_RX; 248 } 249 // Update error bits. 250 self.receive_status_error_clear.set_from_data(c); 251 self.update(); 252 // Must call qemu_chr_fe_accept_input, so return Continue: 253 let c = u32::from(c); 254 return std::ops::ControlFlow::Continue(u64::from(c)); 255 } 256 Ok(RSR) => u32::from(self.receive_status_error_clear), 257 Ok(FR) => u32::from(self.flags), 258 Ok(FBRD) => self.fbrd, 259 Ok(ILPR) => self.ilpr, 260 Ok(IBRD) => self.ibrd, 261 Ok(LCR_H) => u32::from(self.line_control), 262 Ok(CR) => u32::from(self.control), 263 Ok(FLS) => self.ifl, 264 Ok(IMSC) => self.int_enabled, 265 Ok(RIS) => self.int_level, 266 Ok(MIS) => self.int_level & self.int_enabled, 267 Ok(ICR) => { 268 // "The UARTICR Register is the interrupt clear register and is write-only" 269 // Source: ARM DDI 0183G 3.3.13 Interrupt Clear Register, UARTICR 270 0 271 } 272 Ok(DMACR) => self.dmacr, 273 }; 274 std::ops::ControlFlow::Break(value.into()) 275 } 276 277 pub fn write(&mut self, offset: hwaddr, value: u64) { 278 // eprintln!("write offset {offset} value {value}"); 279 use RegisterOffset::*; 280 let value: u32 = value as u32; 281 match RegisterOffset::try_from(offset) { 282 Err(_bad_offset) => { 283 eprintln!("write bad offset {offset} value {value}"); 284 } 285 Ok(DR) => { 286 // ??? Check if transmitter is enabled. 287 let ch: u8 = value as u8; 288 // XXX this blocks entire thread. Rewrite to use 289 // qemu_chr_fe_write and background I/O callbacks 290 291 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 292 // initialized in realize(). 293 unsafe { 294 qemu_chr_fe_write_all(addr_of_mut!(self.char_backend), &ch, 1); 295 } 296 self.loopback_tx(value); 297 self.int_level |= registers::INT_TX; 298 self.update(); 299 } 300 Ok(RSR) => { 301 self.receive_status_error_clear.reset(); 302 } 303 Ok(FR) => { 304 // flag writes are ignored 305 } 306 Ok(ILPR) => { 307 self.ilpr = value; 308 } 309 Ok(IBRD) => { 310 self.ibrd = value; 311 } 312 Ok(FBRD) => { 313 self.fbrd = value; 314 } 315 Ok(LCR_H) => { 316 let new_val: registers::LineControl = value.into(); 317 // Reset the FIFO state on FIFO enable or disable 318 if self.line_control.fifos_enabled() != new_val.fifos_enabled() { 319 self.reset_rx_fifo(); 320 self.reset_tx_fifo(); 321 } 322 if self.line_control.send_break() ^ new_val.send_break() { 323 let mut break_enable: c_int = new_val.send_break().into(); 324 // SAFETY: self.char_backend is a valid CharBackend instance after it's been 325 // initialized in realize(). 326 unsafe { 327 qemu_chr_fe_ioctl( 328 addr_of_mut!(self.char_backend), 329 CHR_IOCTL_SERIAL_SET_BREAK as i32, 330 addr_of_mut!(break_enable).cast::<c_void>(), 331 ); 332 } 333 self.loopback_break(break_enable > 0); 334 } 335 self.line_control = new_val; 336 self.set_read_trigger(); 337 } 338 Ok(CR) => { 339 // ??? Need to implement the enable bit. 340 self.control = value.into(); 341 self.loopback_mdmctrl(); 342 } 343 Ok(FLS) => { 344 self.ifl = value; 345 self.set_read_trigger(); 346 } 347 Ok(IMSC) => { 348 self.int_enabled = value; 349 self.update(); 350 } 351 Ok(RIS) => {} 352 Ok(MIS) => {} 353 Ok(ICR) => { 354 self.int_level &= !value; 355 self.update(); 356 } 357 Ok(DMACR) => { 358 self.dmacr = value; 359 if value & 3 > 0 { 360 // qemu_log_mask(LOG_UNIMP, "pl011: DMA not implemented\n"); 361 eprintln!("pl011: DMA not implemented"); 362 } 363 } 364 } 365 } 366 367 #[inline] 368 fn loopback_tx(&mut self, value: u32) { 369 if !self.loopback_enabled() { 370 return; 371 } 372 373 // Caveat: 374 // 375 // In real hardware, TX loopback happens at the serial-bit level 376 // and then reassembled by the RX logics back into bytes and placed 377 // into the RX fifo. That is, loopback happens after TX fifo. 378 // 379 // Because the real hardware TX fifo is time-drained at the frame 380 // rate governed by the configured serial format, some loopback 381 // bytes in TX fifo may still be able to get into the RX fifo 382 // that could be full at times while being drained at software 383 // pace. 384 // 385 // In such scenario, the RX draining pace is the major factor 386 // deciding which loopback bytes get into the RX fifo, unless 387 // hardware flow-control is enabled. 388 // 389 // For simplicity, the above described is not emulated. 390 self.put_fifo(value); 391 } 392 393 fn loopback_mdmctrl(&mut self) { 394 if !self.loopback_enabled() { 395 return; 396 } 397 398 /* 399 * Loopback software-driven modem control outputs to modem status inputs: 400 * FR.RI <= CR.Out2 401 * FR.DCD <= CR.Out1 402 * FR.CTS <= CR.RTS 403 * FR.DSR <= CR.DTR 404 * 405 * The loopback happens immediately even if this call is triggered 406 * by setting only CR.LBE. 407 * 408 * CTS/RTS updates due to enabled hardware flow controls are not 409 * dealt with here. 410 */ 411 412 self.flags.set_ring_indicator(self.control.out_2()); 413 self.flags.set_data_carrier_detect(self.control.out_1()); 414 self.flags.set_clear_to_send(self.control.request_to_send()); 415 self.flags 416 .set_data_set_ready(self.control.data_transmit_ready()); 417 418 // Change interrupts based on updated FR 419 let mut il = self.int_level; 420 421 il &= !Interrupt::MS; 422 423 if self.flags.data_set_ready() { 424 il |= Interrupt::DSR as u32; 425 } 426 if self.flags.data_carrier_detect() { 427 il |= Interrupt::DCD as u32; 428 } 429 if self.flags.clear_to_send() { 430 il |= Interrupt::CTS as u32; 431 } 432 if self.flags.ring_indicator() { 433 il |= Interrupt::RI as u32; 434 } 435 self.int_level = il; 436 self.update(); 437 } 438 439 fn loopback_break(&mut self, enable: bool) { 440 if enable { 441 self.loopback_tx(registers::Data::BREAK.into()); 442 } 443 } 444 445 fn set_read_trigger(&mut self) { 446 self.read_trigger = 1; 447 } 448 449 pub fn realize(&self) { 450 // SAFETY: self.char_backend has the correct size and alignment for a 451 // CharBackend object, and its callbacks are of the correct types. 452 unsafe { 453 qemu_chr_fe_set_handlers( 454 addr_of!(self.char_backend) as *mut CharBackend, 455 Some(pl011_can_receive), 456 Some(pl011_receive), 457 Some(pl011_event), 458 None, 459 addr_of!(*self).cast::<c_void>() as *mut c_void, 460 core::ptr::null_mut(), 461 true, 462 ); 463 } 464 } 465 466 pub fn reset(&mut self) { 467 self.line_control.reset(); 468 self.receive_status_error_clear.reset(); 469 self.dmacr = 0; 470 self.int_enabled = 0; 471 self.int_level = 0; 472 self.ilpr = 0; 473 self.ibrd = 0; 474 self.fbrd = 0; 475 self.read_trigger = 1; 476 self.ifl = 0x12; 477 self.control.reset(); 478 self.flags.reset(); 479 self.reset_rx_fifo(); 480 self.reset_tx_fifo(); 481 } 482 483 pub fn reset_rx_fifo(&mut self) { 484 self.read_count = 0; 485 self.read_pos = 0; 486 487 // Reset FIFO flags 488 self.flags.set_receive_fifo_full(false); 489 self.flags.set_receive_fifo_empty(true); 490 } 491 492 pub fn reset_tx_fifo(&mut self) { 493 // Reset FIFO flags 494 self.flags.set_transmit_fifo_full(false); 495 self.flags.set_transmit_fifo_empty(true); 496 } 497 498 pub fn can_receive(&self) -> bool { 499 // trace_pl011_can_receive(s->lcr, s->read_count, r); 500 self.read_count < self.fifo_depth() 501 } 502 503 pub fn event(&mut self, event: QEMUChrEvent) { 504 if event == QEMUChrEvent::CHR_EVENT_BREAK && !self.loopback_enabled() { 505 self.put_fifo(registers::Data::BREAK.into()); 506 } 507 } 508 509 #[inline] 510 pub fn fifo_enabled(&self) -> bool { 511 self.line_control.fifos_enabled() == registers::Mode::FIFO 512 } 513 514 #[inline] 515 pub fn loopback_enabled(&self) -> bool { 516 self.control.enable_loopback() 517 } 518 519 #[inline] 520 pub fn fifo_depth(&self) -> u32 { 521 // Note: FIFO depth is expected to be power-of-2 522 if self.fifo_enabled() { 523 return PL011_FIFO_DEPTH; 524 } 525 1 526 } 527 528 pub fn put_fifo(&mut self, value: c_uint) { 529 let depth = self.fifo_depth(); 530 assert!(depth > 0); 531 let slot = (self.read_pos + self.read_count) & (depth - 1); 532 self.read_fifo[slot] = registers::Data::from(value); 533 self.read_count += 1; 534 self.flags.set_receive_fifo_empty(false); 535 if self.read_count == depth { 536 self.flags.set_receive_fifo_full(true); 537 } 538 539 if self.read_count == self.read_trigger { 540 self.int_level |= registers::INT_RX; 541 self.update(); 542 } 543 } 544 545 pub fn update(&self) { 546 let flags = self.int_level & self.int_enabled; 547 for (irq, i) in self.interrupts.iter().zip(IRQMASK) { 548 irq.set(flags & i != 0); 549 } 550 } 551 552 pub fn post_load(&mut self, _version_id: u32) -> Result<(), ()> { 553 /* Sanity-check input state */ 554 if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { 555 return Err(()); 556 } 557 558 if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { 559 // Older versions of PL011 didn't ensure that the single 560 // character in the FIFO in FIFO-disabled mode is in 561 // element 0 of the array; convert to follow the current 562 // code's assumptions. 563 self.read_fifo[0] = self.read_fifo[self.read_pos]; 564 self.read_pos = 0; 565 } 566 567 self.ibrd &= IBRD_MASK; 568 self.fbrd &= FBRD_MASK; 569 570 Ok(()) 571 } 572 } 573 574 /// Which bits in the interrupt status matter for each outbound IRQ line ? 575 pub const IRQMASK: [u32; 6] = [ 576 /* combined IRQ */ 577 Interrupt::E 578 | Interrupt::MS 579 | Interrupt::RT as u32 580 | Interrupt::TX as u32 581 | Interrupt::RX as u32, 582 Interrupt::RX as u32, 583 Interrupt::TX as u32, 584 Interrupt::RT as u32, 585 Interrupt::MS, 586 Interrupt::E, 587 ]; 588 589 /// # Safety 590 /// 591 /// We expect the FFI user of this function to pass a valid pointer, that has 592 /// the same size as [`PL011State`]. We also expect the device is 593 /// readable/writeable from one thread at any time. 594 pub unsafe extern "C" fn pl011_can_receive(opaque: *mut c_void) -> c_int { 595 unsafe { 596 debug_assert!(!opaque.is_null()); 597 let state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 598 state.as_ref().can_receive().into() 599 } 600 } 601 602 /// # Safety 603 /// 604 /// We expect the FFI user of this function to pass a valid pointer, that has 605 /// the same size as [`PL011State`]. We also expect the device is 606 /// readable/writeable from one thread at any time. 607 /// 608 /// The buffer and size arguments must also be valid. 609 pub unsafe extern "C" fn pl011_receive(opaque: *mut c_void, buf: *const u8, size: c_int) { 610 unsafe { 611 debug_assert!(!opaque.is_null()); 612 let mut state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 613 if state.as_ref().loopback_enabled() { 614 return; 615 } 616 if size > 0 { 617 debug_assert!(!buf.is_null()); 618 state.as_mut().put_fifo(c_uint::from(buf.read_volatile())) 619 } 620 } 621 } 622 623 /// # Safety 624 /// 625 /// We expect the FFI user of this function to pass a valid pointer, that has 626 /// the same size as [`PL011State`]. We also expect the device is 627 /// readable/writeable from one thread at any time. 628 pub unsafe extern "C" fn pl011_event(opaque: *mut c_void, event: QEMUChrEvent) { 629 unsafe { 630 debug_assert!(!opaque.is_null()); 631 let mut state = NonNull::new_unchecked(opaque.cast::<PL011State>()); 632 state.as_mut().event(event) 633 } 634 } 635 636 /// # Safety 637 /// 638 /// We expect the FFI user of this function to pass a valid pointer for `chr`. 639 #[no_mangle] 640 pub unsafe extern "C" fn pl011_create( 641 addr: u64, 642 irq: qemu_irq, 643 chr: *mut Chardev, 644 ) -> *mut DeviceState { 645 unsafe { 646 let dev: *mut DeviceState = qdev_new(PL011State::TYPE_NAME.as_ptr()); 647 let sysbus: *mut SysBusDevice = dev.cast::<SysBusDevice>(); 648 649 qdev_prop_set_chr(dev, c_str!("chardev").as_ptr(), chr); 650 sysbus_realize_and_unref(sysbus, addr_of_mut!(error_fatal)); 651 sysbus_mmio_map(sysbus, 0, addr); 652 sysbus_connect_irq(sysbus, 0, irq); 653 dev 654 } 655 } 656 657 #[repr(C)] 658 #[derive(Debug, qemu_api_macros::Object)] 659 /// PL011 Luminary device model. 660 pub struct PL011Luminary { 661 parent_obj: ParentField<PL011State>, 662 } 663 664 impl ClassInitImpl<PL011Class> for PL011Luminary { 665 fn class_init(klass: &mut PL011Class) { 666 klass.device_id = DeviceId::LUMINARY; 667 <Self as ClassInitImpl<SysBusDeviceClass>>::class_init(&mut klass.parent_class); 668 } 669 } 670 671 qom_isa!(PL011Luminary : PL011State, SysBusDevice, DeviceState, Object); 672 673 unsafe impl ObjectType for PL011Luminary { 674 type Class = <PL011State as ObjectType>::Class; 675 const TYPE_NAME: &'static CStr = crate::TYPE_PL011_LUMINARY; 676 } 677 678 impl ObjectImpl for PL011Luminary { 679 type ParentType = PL011State; 680 } 681 682 impl DeviceImpl for PL011Luminary {} 683