xref: /linux/rust/kernel/io.rs (revision 0074281bb6316108e0cff094bd4db78ab3eee236)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Memory-mapped IO.
4 //!
5 //! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6 
7 use crate::error::{code::EINVAL, Result};
8 use crate::{bindings, build_assert, ffi::c_void};
9 
10 pub mod mem;
11 pub mod resource;
12 
13 pub use resource::Resource;
14 
15 /// Raw representation of an MMIO region.
16 ///
17 /// By itself, the existence of an instance of this structure does not provide any guarantees that
18 /// the represented MMIO region does exist or is properly mapped.
19 ///
20 /// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
21 /// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
22 /// any guarantees are given.
23 pub struct IoRaw<const SIZE: usize = 0> {
24     addr: usize,
25     maxsize: usize,
26 }
27 
28 impl<const SIZE: usize> IoRaw<SIZE> {
29     /// Returns a new `IoRaw` instance on success, an error otherwise.
new(addr: usize, maxsize: usize) -> Result<Self>30     pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
31         if maxsize < SIZE {
32             return Err(EINVAL);
33         }
34 
35         Ok(Self { addr, maxsize })
36     }
37 
38     /// Returns the base address of the MMIO region.
39     #[inline]
addr(&self) -> usize40     pub fn addr(&self) -> usize {
41         self.addr
42     }
43 
44     /// Returns the maximum size of the MMIO region.
45     #[inline]
maxsize(&self) -> usize46     pub fn maxsize(&self) -> usize {
47         self.maxsize
48     }
49 }
50 
51 /// IO-mapped memory region.
52 ///
53 /// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
54 /// mapping, performing an additional region request etc.
55 ///
56 /// # Invariant
57 ///
58 /// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
59 /// `maxsize`.
60 ///
61 /// # Examples
62 ///
63 /// ```no_run
64 /// # use kernel::{bindings, ffi::c_void, io::{Io, IoRaw}};
65 /// # use core::ops::Deref;
66 ///
67 /// // See also [`pci::Bar`] for a real example.
68 /// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
69 ///
70 /// impl<const SIZE: usize> IoMem<SIZE> {
71 ///     /// # Safety
72 ///     ///
73 ///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
74 ///     /// virtual address space.
75 ///     unsafe fn new(paddr: usize) -> Result<Self>{
76 ///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
77 ///         // valid for `ioremap`.
78 ///         let addr = unsafe { bindings::ioremap(paddr as bindings::phys_addr_t, SIZE) };
79 ///         if addr.is_null() {
80 ///             return Err(ENOMEM);
81 ///         }
82 ///
83 ///         Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
84 ///     }
85 /// }
86 ///
87 /// impl<const SIZE: usize> Drop for IoMem<SIZE> {
88 ///     fn drop(&mut self) {
89 ///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
90 ///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
91 ///     }
92 /// }
93 ///
94 /// impl<const SIZE: usize> Deref for IoMem<SIZE> {
95 ///    type Target = Io<SIZE>;
96 ///
97 ///    fn deref(&self) -> &Self::Target {
98 ///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
99 ///         unsafe { Io::from_raw(&self.0) }
100 ///    }
101 /// }
102 ///
103 ///# fn no_run() -> Result<(), Error> {
104 /// // SAFETY: Invalid usage for example purposes.
105 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
106 /// iomem.write32(0x42, 0x0);
107 /// assert!(iomem.try_write32(0x42, 0x0).is_ok());
108 /// assert!(iomem.try_write32(0x42, 0x4).is_err());
109 /// # Ok(())
110 /// # }
111 /// ```
112 #[repr(transparent)]
113 pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
114 
115 macro_rules! define_read {
116     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
117         /// Read IO data from a given offset known at compile time.
118         ///
119         /// Bound checks are performed on compile time, hence if the offset is not known at compile
120         /// time, the build will fail.
121         $(#[$attr])*
122         #[inline]
123         pub fn $name(&self, offset: usize) -> $type_name {
124             let addr = self.io_addr_assert::<$type_name>(offset);
125 
126             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
127             unsafe { bindings::$c_fn(addr as *const c_void) }
128         }
129 
130         /// Read IO data from a given offset.
131         ///
132         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
133         /// out of bounds.
134         $(#[$attr])*
135         pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
136             let addr = self.io_addr::<$type_name>(offset)?;
137 
138             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
139             Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
140         }
141     };
142 }
143 
144 macro_rules! define_write {
145     ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
146         /// Write IO data from a given offset known at compile time.
147         ///
148         /// Bound checks are performed on compile time, hence if the offset is not known at compile
149         /// time, the build will fail.
150         $(#[$attr])*
151         #[inline]
152         pub fn $name(&self, value: $type_name, offset: usize) {
153             let addr = self.io_addr_assert::<$type_name>(offset);
154 
155             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
156             unsafe { bindings::$c_fn(value, addr as *mut c_void) }
157         }
158 
159         /// Write IO data from a given offset.
160         ///
161         /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
162         /// out of bounds.
163         $(#[$attr])*
164         pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
165             let addr = self.io_addr::<$type_name>(offset)?;
166 
167             // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
168             unsafe { bindings::$c_fn(value, addr as *mut c_void) }
169             Ok(())
170         }
171     };
172 }
173 
174 impl<const SIZE: usize> Io<SIZE> {
175     /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
176     ///
177     /// # Safety
178     ///
179     /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
180     /// `maxsize`.
from_raw(raw: &IoRaw<SIZE>) -> &Self181     pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
182         // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
183         unsafe { &*core::ptr::from_ref(raw).cast() }
184     }
185 
186     /// Returns the base address of this mapping.
187     #[inline]
addr(&self) -> usize188     pub fn addr(&self) -> usize {
189         self.0.addr()
190     }
191 
192     /// Returns the maximum size of this mapping.
193     #[inline]
maxsize(&self) -> usize194     pub fn maxsize(&self) -> usize {
195         self.0.maxsize()
196     }
197 
198     #[inline]
offset_valid<U>(offset: usize, size: usize) -> bool199     const fn offset_valid<U>(offset: usize, size: usize) -> bool {
200         let type_size = core::mem::size_of::<U>();
201         if let Some(end) = offset.checked_add(type_size) {
202             end <= size && offset % type_size == 0
203         } else {
204             false
205         }
206     }
207 
208     #[inline]
io_addr<U>(&self, offset: usize) -> Result<usize>209     fn io_addr<U>(&self, offset: usize) -> Result<usize> {
210         if !Self::offset_valid::<U>(offset, self.maxsize()) {
211             return Err(EINVAL);
212         }
213 
214         // Probably no need to check, since the safety requirements of `Self::new` guarantee that
215         // this can't overflow.
216         self.addr().checked_add(offset).ok_or(EINVAL)
217     }
218 
219     #[inline]
io_addr_assert<U>(&self, offset: usize) -> usize220     fn io_addr_assert<U>(&self, offset: usize) -> usize {
221         build_assert!(Self::offset_valid::<U>(offset, SIZE));
222 
223         self.addr() + offset
224     }
225 
226     define_read!(read8, try_read8, readb -> u8);
227     define_read!(read16, try_read16, readw -> u16);
228     define_read!(read32, try_read32, readl -> u32);
229     define_read!(
230         #[cfg(CONFIG_64BIT)]
231         read64,
232         try_read64,
233         readq -> u64
234     );
235 
236     define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
237     define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
238     define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
239     define_read!(
240         #[cfg(CONFIG_64BIT)]
241         read64_relaxed,
242         try_read64_relaxed,
243         readq_relaxed -> u64
244     );
245 
246     define_write!(write8, try_write8, writeb <- u8);
247     define_write!(write16, try_write16, writew <- u16);
248     define_write!(write32, try_write32, writel <- u32);
249     define_write!(
250         #[cfg(CONFIG_64BIT)]
251         write64,
252         try_write64,
253         writeq <- u64
254     );
255 
256     define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
257     define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
258     define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
259     define_write!(
260         #[cfg(CONFIG_64BIT)]
261         write64_relaxed,
262         try_write64_relaxed,
263         writeq_relaxed <- u64
264     );
265 }
266