xref: /linux/rust/kernel/dma.rs (revision 0074281bb6316108e0cff094bd4db78ab3eee236) !
1 // SPDX-License-Identifier: GPL-2.0
2 
3 //! Direct memory access (DMA).
4 //!
5 //! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
6 
7 use crate::{
8     bindings, build_assert, device,
9     device::{Bound, Core},
10     error::{to_result, Result},
11     prelude::*,
12     transmute::{AsBytes, FromBytes},
13     types::ARef,
14 };
15 
16 /// Trait to be implemented by DMA capable bus devices.
17 ///
18 /// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
19 /// where the underlying bus is DMA capable, such as [`pci::Device`](::kernel::pci::Device) or
20 /// [`platform::Device`](::kernel::platform::Device).
21 pub trait Device: AsRef<device::Device<Core>> {
22     /// Set up the device's DMA streaming addressing capabilities.
23     ///
24     /// This method is usually called once from `probe()` as soon as the device capabilities are
25     /// known.
26     ///
27     /// # Safety
28     ///
29     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
30     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_mask(&self, mask: DmaMask) -> Result31     unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
32         // SAFETY:
33         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
34         // - The safety requirement of this function guarantees that there are no concurrent calls
35         //   to DMA allocation and mapping primitives using this mask.
36         to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
37     }
38 
39     /// Set up the device's DMA coherent addressing capabilities.
40     ///
41     /// This method is usually called once from `probe()` as soon as the device capabilities are
42     /// known.
43     ///
44     /// # Safety
45     ///
46     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
47     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_coherent_mask(&self, mask: DmaMask) -> Result48     unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
49         // SAFETY:
50         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
51         // - The safety requirement of this function guarantees that there are no concurrent calls
52         //   to DMA allocation and mapping primitives using this mask.
53         to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
54     }
55 
56     /// Set up the device's DMA addressing capabilities.
57     ///
58     /// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
59     ///
60     /// This method is usually called once from `probe()` as soon as the device capabilities are
61     /// known.
62     ///
63     /// # Safety
64     ///
65     /// This method must not be called concurrently with any DMA allocation or mapping primitives,
66     /// such as [`CoherentAllocation::alloc_attrs`].
dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result67     unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
68         // SAFETY:
69         // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
70         // - The safety requirement of this function guarantees that there are no concurrent calls
71         //   to DMA allocation and mapping primitives using this mask.
72         to_result(unsafe {
73             bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
74         })
75     }
76 }
77 
78 /// A DMA mask that holds a bitmask with the lowest `n` bits set.
79 ///
80 /// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
81 /// are guaranteed to never exceed the bit width of `u64`.
82 ///
83 /// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
84 #[derive(Debug, Clone, Copy, PartialEq, Eq)]
85 pub struct DmaMask(u64);
86 
87 impl DmaMask {
88     /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
89     ///
90     /// For `n <= 64`, sets exactly the lowest `n` bits.
91     /// For `n > 64`, results in a build error.
92     ///
93     /// # Examples
94     ///
95     /// ```
96     /// use kernel::dma::DmaMask;
97     ///
98     /// let mask0 = DmaMask::new::<0>();
99     /// assert_eq!(mask0.value(), 0);
100     ///
101     /// let mask1 = DmaMask::new::<1>();
102     /// assert_eq!(mask1.value(), 0b1);
103     ///
104     /// let mask64 = DmaMask::new::<64>();
105     /// assert_eq!(mask64.value(), u64::MAX);
106     ///
107     /// // Build failure.
108     /// // let mask_overflow = DmaMask::new::<100>();
109     /// ```
110     #[inline]
new<const N: u32>() -> Self111     pub const fn new<const N: u32>() -> Self {
112         let Ok(mask) = Self::try_new(N) else {
113             build_error!("Invalid DMA Mask.");
114         };
115 
116         mask
117     }
118 
119     /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
120     ///
121     /// For `n <= 64`, sets exactly the lowest `n` bits.
122     /// For `n > 64`, returns [`EINVAL`].
123     ///
124     /// # Examples
125     ///
126     /// ```
127     /// use kernel::dma::DmaMask;
128     ///
129     /// let mask0 = DmaMask::try_new(0)?;
130     /// assert_eq!(mask0.value(), 0);
131     ///
132     /// let mask1 = DmaMask::try_new(1)?;
133     /// assert_eq!(mask1.value(), 0b1);
134     ///
135     /// let mask64 = DmaMask::try_new(64)?;
136     /// assert_eq!(mask64.value(), u64::MAX);
137     ///
138     /// let mask_overflow = DmaMask::try_new(100);
139     /// assert!(mask_overflow.is_err());
140     /// # Ok::<(), Error>(())
141     /// ```
142     #[inline]
try_new(n: u32) -> Result<Self>143     pub const fn try_new(n: u32) -> Result<Self> {
144         Ok(Self(match n {
145             0 => 0,
146             1..=64 => u64::MAX >> (64 - n),
147             _ => return Err(EINVAL),
148         }))
149     }
150 
151     /// Returns the underlying `u64` bitmask value.
152     #[inline]
value(&self) -> u64153     pub const fn value(&self) -> u64 {
154         self.0
155     }
156 }
157 
158 /// Possible attributes associated with a DMA mapping.
159 ///
160 /// They can be combined with the operators `|`, `&`, and `!`.
161 ///
162 /// Values can be used from the [`attrs`] module.
163 ///
164 /// # Examples
165 ///
166 /// ```
167 /// # use kernel::device::{Bound, Device};
168 /// use kernel::dma::{attrs::*, CoherentAllocation};
169 ///
170 /// # fn test(dev: &Device<Bound>) -> Result {
171 /// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
172 /// let c: CoherentAllocation<u64> =
173 ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
174 /// # Ok::<(), Error>(()) }
175 /// ```
176 #[derive(Clone, Copy, PartialEq)]
177 #[repr(transparent)]
178 pub struct Attrs(u32);
179 
180 impl Attrs {
181     /// Get the raw representation of this attribute.
as_raw(self) -> crate::ffi::c_ulong182     pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
183         self.0 as crate::ffi::c_ulong
184     }
185 
186     /// Check whether `flags` is contained in `self`.
contains(self, flags: Attrs) -> bool187     pub fn contains(self, flags: Attrs) -> bool {
188         (self & flags) == flags
189     }
190 }
191 
192 impl core::ops::BitOr for Attrs {
193     type Output = Self;
bitor(self, rhs: Self) -> Self::Output194     fn bitor(self, rhs: Self) -> Self::Output {
195         Self(self.0 | rhs.0)
196     }
197 }
198 
199 impl core::ops::BitAnd for Attrs {
200     type Output = Self;
bitand(self, rhs: Self) -> Self::Output201     fn bitand(self, rhs: Self) -> Self::Output {
202         Self(self.0 & rhs.0)
203     }
204 }
205 
206 impl core::ops::Not for Attrs {
207     type Output = Self;
not(self) -> Self::Output208     fn not(self) -> Self::Output {
209         Self(!self.0)
210     }
211 }
212 
213 /// DMA mapping attributes.
214 pub mod attrs {
215     use super::Attrs;
216 
217     /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
218     /// and writes may pass each other.
219     pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
220 
221     /// Specifies that writes to the mapping may be buffered to improve performance.
222     pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
223 
224     /// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
225     pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
226 
227     /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
228     /// that it has been already transferred to 'device' domain.
229     pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
230 
231     /// Forces contiguous allocation of the buffer in physical memory.
232     pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
233 
234     /// Hints DMA-mapping subsystem that it's probably not worth the time to try
235     /// to allocate memory to in a way that gives better TLB efficiency.
236     pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
237 
238     /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
239     /// `__GFP_NOWARN`).
240     pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
241 
242     /// Indicates that the buffer is fully accessible at an elevated privilege level (and
243     /// ideally inaccessible or at least read-only at lesser-privileged levels).
244     pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
245 }
246 
247 /// An abstraction of the `dma_alloc_coherent` API.
248 ///
249 /// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
250 /// large coherent DMA regions.
251 ///
252 /// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
253 /// processor's virtual address space) and the device address which can be given to the device
254 /// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
255 /// is dropped.
256 ///
257 /// # Invariants
258 ///
259 /// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
260 ///   to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
261 ///   region.
262 /// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
263 /// - `size_of::<T> * count` fits into a `usize`.
264 // TODO
265 //
266 // DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
267 // reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
268 // that device resources can never survive device unbind.
269 //
270 // However, it is neither desirable nor necessary to protect the allocated memory of the DMA
271 // allocation from surviving device unbind; it would require RCU read side critical sections to
272 // access the memory, which may require subsequent unnecessary copies.
273 //
274 // Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
275 // entire `CoherentAllocation` including the allocated memory itself.
276 pub struct CoherentAllocation<T: AsBytes + FromBytes> {
277     dev: ARef<device::Device>,
278     dma_handle: bindings::dma_addr_t,
279     count: usize,
280     cpu_addr: *mut T,
281     dma_attrs: Attrs,
282 }
283 
284 impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
285     /// Allocates a region of `size_of::<T> * count` of coherent memory.
286     ///
287     /// # Examples
288     ///
289     /// ```
290     /// # use kernel::device::{Bound, Device};
291     /// use kernel::dma::{attrs::*, CoherentAllocation};
292     ///
293     /// # fn test(dev: &Device<Bound>) -> Result {
294     /// let c: CoherentAllocation<u64> =
295     ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
296     /// # Ok::<(), Error>(()) }
297     /// ```
alloc_attrs( dev: &device::Device<Bound>, count: usize, gfp_flags: kernel::alloc::Flags, dma_attrs: Attrs, ) -> Result<CoherentAllocation<T>>298     pub fn alloc_attrs(
299         dev: &device::Device<Bound>,
300         count: usize,
301         gfp_flags: kernel::alloc::Flags,
302         dma_attrs: Attrs,
303     ) -> Result<CoherentAllocation<T>> {
304         build_assert!(
305             core::mem::size_of::<T>() > 0,
306             "It doesn't make sense for the allocated type to be a ZST"
307         );
308 
309         let size = count
310             .checked_mul(core::mem::size_of::<T>())
311             .ok_or(EOVERFLOW)?;
312         let mut dma_handle = 0;
313         // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
314         let ret = unsafe {
315             bindings::dma_alloc_attrs(
316                 dev.as_raw(),
317                 size,
318                 &mut dma_handle,
319                 gfp_flags.as_raw(),
320                 dma_attrs.as_raw(),
321             )
322         };
323         if ret.is_null() {
324             return Err(ENOMEM);
325         }
326         // INVARIANT:
327         // - We just successfully allocated a coherent region which is accessible for
328         //   `count` elements, hence the cpu address is valid. We also hold a refcounted reference
329         //   to the device.
330         // - The allocated `size` is equal to `size_of::<T> * count`.
331         // - The allocated `size` fits into a `usize`.
332         Ok(Self {
333             dev: dev.into(),
334             dma_handle,
335             count,
336             cpu_addr: ret.cast::<T>(),
337             dma_attrs,
338         })
339     }
340 
341     /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
342     /// `dma_attrs` is 0 by default.
alloc_coherent( dev: &device::Device<Bound>, count: usize, gfp_flags: kernel::alloc::Flags, ) -> Result<CoherentAllocation<T>>343     pub fn alloc_coherent(
344         dev: &device::Device<Bound>,
345         count: usize,
346         gfp_flags: kernel::alloc::Flags,
347     ) -> Result<CoherentAllocation<T>> {
348         CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
349     }
350 
351     /// Returns the number of elements `T` in this allocation.
352     ///
353     /// Note that this is not the size of the allocation in bytes, which is provided by
354     /// [`Self::size`].
count(&self) -> usize355     pub fn count(&self) -> usize {
356         self.count
357     }
358 
359     /// Returns the size in bytes of this allocation.
size(&self) -> usize360     pub fn size(&self) -> usize {
361         // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
362         // a `usize`.
363         self.count * core::mem::size_of::<T>()
364     }
365 
366     /// Returns the base address to the allocated region in the CPU's virtual address space.
start_ptr(&self) -> *const T367     pub fn start_ptr(&self) -> *const T {
368         self.cpu_addr
369     }
370 
371     /// Returns the base address to the allocated region in the CPU's virtual address space as
372     /// a mutable pointer.
start_ptr_mut(&mut self) -> *mut T373     pub fn start_ptr_mut(&mut self) -> *mut T {
374         self.cpu_addr
375     }
376 
377     /// Returns a DMA handle which may be given to the device as the DMA address base of
378     /// the region.
dma_handle(&self) -> bindings::dma_addr_t379     pub fn dma_handle(&self) -> bindings::dma_addr_t {
380         self.dma_handle
381     }
382 
383     /// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
384     /// device as the DMA address base of the region.
385     ///
386     /// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
dma_handle_with_offset(&self, offset: usize) -> Result<bindings::dma_addr_t>387     pub fn dma_handle_with_offset(&self, offset: usize) -> Result<bindings::dma_addr_t> {
388         if offset >= self.count {
389             Err(EINVAL)
390         } else {
391             // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
392             // into a `usize`, and `offset` is inferior to `count`.
393             Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as bindings::dma_addr_t)
394         }
395     }
396 
397     /// Common helper to validate a range applied from the allocated region in the CPU's virtual
398     /// address space.
validate_range(&self, offset: usize, count: usize) -> Result399     fn validate_range(&self, offset: usize, count: usize) -> Result {
400         if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
401             return Err(EINVAL);
402         }
403         Ok(())
404     }
405 
406     /// Returns the data from the region starting from `offset` as a slice.
407     /// `offset` and `count` are in units of `T`, not the number of bytes.
408     ///
409     /// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
410     /// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
411     /// instead.
412     ///
413     /// # Safety
414     ///
415     /// * Callers must ensure that the device does not read/write to/from memory while the returned
416     ///   slice is live.
417     /// * Callers must ensure that this call does not race with a write to the same region while
418     ///   the returned slice is live.
as_slice(&self, offset: usize, count: usize) -> Result<&[T]>419     pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
420         self.validate_range(offset, count)?;
421         // SAFETY:
422         // - The pointer is valid due to type invariant on `CoherentAllocation`,
423         //   we've just checked that the range and index is within bounds. The immutability of the
424         //   data is also guaranteed by the safety requirements of the function.
425         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
426         //   that `self.count` won't overflow early in the constructor.
427         Ok(unsafe { core::slice::from_raw_parts(self.cpu_addr.add(offset), count) })
428     }
429 
430     /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
431     /// slice is returned.
432     ///
433     /// # Safety
434     ///
435     /// * Callers must ensure that the device does not read/write to/from memory while the returned
436     ///   slice is live.
437     /// * Callers must ensure that this call does not race with a read or write to the same region
438     ///   while the returned slice is live.
as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]>439     pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
440         self.validate_range(offset, count)?;
441         // SAFETY:
442         // - The pointer is valid due to type invariant on `CoherentAllocation`,
443         //   we've just checked that the range and index is within bounds. The immutability of the
444         //   data is also guaranteed by the safety requirements of the function.
445         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
446         //   that `self.count` won't overflow early in the constructor.
447         Ok(unsafe { core::slice::from_raw_parts_mut(self.cpu_addr.add(offset), count) })
448     }
449 
450     /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
451     /// number of bytes.
452     ///
453     /// # Safety
454     ///
455     /// * Callers must ensure that the device does not read/write to/from memory while the returned
456     ///   slice is live.
457     /// * Callers must ensure that this call does not race with a read or write to the same region
458     ///   that overlaps with this write.
459     ///
460     /// # Examples
461     ///
462     /// ```
463     /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
464     /// let somedata: [u8; 4] = [0xf; 4];
465     /// let buf: &[u8] = &somedata;
466     /// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
467     /// // region.
468     /// unsafe { alloc.write(buf, 0)?; }
469     /// # Ok::<(), Error>(()) }
470     /// ```
write(&mut self, src: &[T], offset: usize) -> Result471     pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
472         self.validate_range(offset, src.len())?;
473         // SAFETY:
474         // - The pointer is valid due to type invariant on `CoherentAllocation`
475         //   and we've just checked that the range and index is within bounds.
476         // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
477         //   that `self.count` won't overflow early in the constructor.
478         unsafe {
479             core::ptr::copy_nonoverlapping(src.as_ptr(), self.cpu_addr.add(offset), src.len())
480         };
481         Ok(())
482     }
483 
484     /// Returns a pointer to an element from the region with bounds checking. `offset` is in
485     /// units of `T`, not the number of bytes.
486     ///
487     /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
488     #[doc(hidden)]
item_from_index(&self, offset: usize) -> Result<*mut T>489     pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
490         if offset >= self.count {
491             return Err(EINVAL);
492         }
493         // SAFETY:
494         // - The pointer is valid due to type invariant on `CoherentAllocation`
495         // and we've just checked that the range and index is within bounds.
496         // - `offset` can't overflow since it is smaller than `self.count` and we've checked
497         // that `self.count` won't overflow early in the constructor.
498         Ok(unsafe { self.cpu_addr.add(offset) })
499     }
500 
501     /// Reads the value of `field` and ensures that its type is [`FromBytes`].
502     ///
503     /// # Safety
504     ///
505     /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
506     /// validated beforehand.
507     ///
508     /// Public but hidden since it should only be used from [`dma_read`] macro.
509     #[doc(hidden)]
field_read<F: FromBytes>(&self, field: *const F) -> F510     pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
511         // SAFETY:
512         // - By the safety requirements field is valid.
513         // - Using read_volatile() here is not sound as per the usual rules, the usage here is
514         // a special exception with the following notes in place. When dealing with a potential
515         // race from a hardware or code outside kernel (e.g. user-space program), we need that
516         // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
517         // rationale behind is that it should generate the same code as READ_ONCE() which the
518         // kernel already relies on to avoid UB on data races. Note that the usage of
519         // read_volatile() is limited to this particular case, it cannot be used to prevent
520         // the UB caused by racing between two kernel functions nor do they provide atomicity.
521         unsafe { field.read_volatile() }
522     }
523 
524     /// Writes a value to `field` and ensures that its type is [`AsBytes`].
525     ///
526     /// # Safety
527     ///
528     /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
529     /// validated beforehand.
530     ///
531     /// Public but hidden since it should only be used from [`dma_write`] macro.
532     #[doc(hidden)]
field_write<F: AsBytes>(&self, field: *mut F, val: F)533     pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
534         // SAFETY:
535         // - By the safety requirements field is valid.
536         // - Using write_volatile() here is not sound as per the usual rules, the usage here is
537         // a special exception with the following notes in place. When dealing with a potential
538         // race from a hardware or code outside kernel (e.g. user-space program), we need that
539         // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
540         // rationale behind is that it should generate the same code as WRITE_ONCE() which the
541         // kernel already relies on to avoid UB on data races. Note that the usage of
542         // write_volatile() is limited to this particular case, it cannot be used to prevent
543         // the UB caused by racing between two kernel functions nor do they provide atomicity.
544         unsafe { field.write_volatile(val) }
545     }
546 }
547 
548 /// Note that the device configured to do DMA must be halted before this object is dropped.
549 impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
drop(&mut self)550     fn drop(&mut self) {
551         let size = self.count * core::mem::size_of::<T>();
552         // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
553         // The cpu address, and the dma handle are valid due to the type invariants on
554         // `CoherentAllocation`.
555         unsafe {
556             bindings::dma_free_attrs(
557                 self.dev.as_raw(),
558                 size,
559                 self.cpu_addr.cast(),
560                 self.dma_handle,
561                 self.dma_attrs.as_raw(),
562             )
563         }
564     }
565 }
566 
567 // SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
568 // can be sent to another thread.
569 unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
570 
571 /// Reads a field of an item from an allocated region of structs.
572 ///
573 /// # Examples
574 ///
575 /// ```
576 /// use kernel::device::Device;
577 /// use kernel::dma::{attrs::*, CoherentAllocation};
578 ///
579 /// struct MyStruct { field: u32, }
580 ///
581 /// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
582 /// unsafe impl kernel::transmute::FromBytes for MyStruct{};
583 /// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
584 /// unsafe impl kernel::transmute::AsBytes for MyStruct{};
585 ///
586 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
587 /// let whole = kernel::dma_read!(alloc[2]);
588 /// let field = kernel::dma_read!(alloc[1].field);
589 /// # Ok::<(), Error>(()) }
590 /// ```
591 #[macro_export]
592 macro_rules! dma_read {
593     ($dma:expr, $idx: expr, $($field:tt)*) => {{
594         (|| -> ::core::result::Result<_, $crate::error::Error> {
595             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
596             // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
597             // dereferenced. The compiler also further validates the expression on whether `field`
598             // is a member of `item` when expanded by the macro.
599             unsafe {
600                 let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
601                 ::core::result::Result::Ok(
602                     $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
603                 )
604             }
605         })()
606     }};
607     ($dma:ident [ $idx:expr ] $($field:tt)* ) => {
608         $crate::dma_read!($dma, $idx, $($field)*)
609     };
610     ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
611         $crate::dma_read!($($dma).*, $idx, $($field)*)
612     };
613 }
614 
615 /// Writes to a field of an item from an allocated region of structs.
616 ///
617 /// # Examples
618 ///
619 /// ```
620 /// use kernel::device::Device;
621 /// use kernel::dma::{attrs::*, CoherentAllocation};
622 ///
623 /// struct MyStruct { member: u32, }
624 ///
625 /// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
626 /// unsafe impl kernel::transmute::FromBytes for MyStruct{};
627 /// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
628 /// unsafe impl kernel::transmute::AsBytes for MyStruct{};
629 ///
630 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
631 /// kernel::dma_write!(alloc[2].member = 0xf);
632 /// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
633 /// # Ok::<(), Error>(()) }
634 /// ```
635 #[macro_export]
636 macro_rules! dma_write {
637     ($dma:ident [ $idx:expr ] $($field:tt)*) => {{
638         $crate::dma_write!($dma, $idx, $($field)*)
639     }};
640     ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
641         $crate::dma_write!($($dma).*, $idx, $($field)*)
642     }};
643     ($dma:expr, $idx: expr, = $val:expr) => {
644         (|| -> ::core::result::Result<_, $crate::error::Error> {
645             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
646             // SAFETY: `item_from_index` ensures that `item` is always a valid item.
647             unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
648             ::core::result::Result::Ok(())
649         })()
650     };
651     ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
652         (|| -> ::core::result::Result<_, $crate::error::Error> {
653             let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
654             // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
655             // dereferenced. The compiler also further validates the expression on whether `field`
656             // is a member of `item` when expanded by the macro.
657             unsafe {
658                 let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
659                 $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
660             }
661             ::core::result::Result::Ok(())
662         })()
663     };
664 }
665