xref: /qemu/rust/qemu-api/src/memory.rs (revision c5f122fdcc280a82e7c5f31de890f985aa7ba773)
1 // Copyright 2024 Red Hat, Inc.
2 // Author(s): Paolo Bonzini <pbonzini@redhat.com>
3 // SPDX-License-Identifier: GPL-2.0-or-later
4 
5 //! Bindings for `MemoryRegion`, `MemoryRegionOps` and `MemTxAttrs`
6 
7 use std::{
8     ffi::{c_uint, c_void, CStr, CString},
9     marker::PhantomData,
10 };
11 
12 pub use bindings::{hwaddr, MemTxAttrs};
13 
14 use crate::{
15     bindings::{self, device_endian, memory_region_init_io},
16     callbacks::FnCall,
17     cell::Opaque,
18     prelude::*,
19     zeroable::Zeroable,
20 };
21 
22 pub struct MemoryRegionOps<T>(
23     bindings::MemoryRegionOps,
24     // Note: quite often you'll see PhantomData<fn(&T)> mentioned when discussing
25     // covariance and contravariance; you don't need any of those to understand
26     // this usage of PhantomData.  Quite simply, MemoryRegionOps<T> *logically*
27     // holds callbacks that take an argument of type &T, except the type is erased
28     // before the callback is stored in the bindings::MemoryRegionOps field.
29     // The argument of PhantomData is a function pointer in order to represent
30     // that relationship; while that will also provide desirable and safe variance
31     // for T, variance is not the point but just a consequence.
32     PhantomData<fn(&T)>,
33 );
34 
35 // SAFETY: When a *const T is passed to the callbacks, the call itself
36 // is done in a thread-safe manner.  The invocation is okay as long as
37 // T itself is `Sync`.
38 unsafe impl<T: Sync> Sync for MemoryRegionOps<T> {}
39 
40 #[derive(Clone)]
41 pub struct MemoryRegionOpsBuilder<T>(bindings::MemoryRegionOps, PhantomData<fn(&T)>);
42 
memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>( opaque: *mut c_void, addr: hwaddr, size: c_uint, ) -> u6443 unsafe extern "C" fn memory_region_ops_read_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(
44     opaque: *mut c_void,
45     addr: hwaddr,
46     size: c_uint,
47 ) -> u64 {
48     F::call((unsafe { &*(opaque.cast::<T>()) }, addr, size))
49 }
50 
memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>( opaque: *mut c_void, addr: hwaddr, data: u64, size: c_uint, )51 unsafe extern "C" fn memory_region_ops_write_cb<T, F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(
52     opaque: *mut c_void,
53     addr: hwaddr,
54     data: u64,
55     size: c_uint,
56 ) {
57     F::call((unsafe { &*(opaque.cast::<T>()) }, addr, data, size))
58 }
59 
60 impl<T> MemoryRegionOpsBuilder<T> {
61     #[must_use]
read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self62     pub const fn read<F: for<'a> FnCall<(&'a T, hwaddr, u32), u64>>(mut self, _f: &F) -> Self {
63         self.0.read = Some(memory_region_ops_read_cb::<T, F>);
64         self
65     }
66 
67     #[must_use]
write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self68     pub const fn write<F: for<'a> FnCall<(&'a T, hwaddr, u64, u32)>>(mut self, _f: &F) -> Self {
69         self.0.write = Some(memory_region_ops_write_cb::<T, F>);
70         self
71     }
72 
73     #[must_use]
big_endian(mut self) -> Self74     pub const fn big_endian(mut self) -> Self {
75         self.0.endianness = device_endian::DEVICE_BIG_ENDIAN;
76         self
77     }
78 
79     #[must_use]
little_endian(mut self) -> Self80     pub const fn little_endian(mut self) -> Self {
81         self.0.endianness = device_endian::DEVICE_LITTLE_ENDIAN;
82         self
83     }
84 
85     #[must_use]
native_endian(mut self) -> Self86     pub const fn native_endian(mut self) -> Self {
87         self.0.endianness = device_endian::DEVICE_NATIVE_ENDIAN;
88         self
89     }
90 
91     #[must_use]
valid_sizes(mut self, min: u32, max: u32) -> Self92     pub const fn valid_sizes(mut self, min: u32, max: u32) -> Self {
93         self.0.valid.min_access_size = min;
94         self.0.valid.max_access_size = max;
95         self
96     }
97 
98     #[must_use]
valid_unaligned(mut self) -> Self99     pub const fn valid_unaligned(mut self) -> Self {
100         self.0.valid.unaligned = true;
101         self
102     }
103 
104     #[must_use]
impl_sizes(mut self, min: u32, max: u32) -> Self105     pub const fn impl_sizes(mut self, min: u32, max: u32) -> Self {
106         self.0.impl_.min_access_size = min;
107         self.0.impl_.max_access_size = max;
108         self
109     }
110 
111     #[must_use]
impl_unaligned(mut self) -> Self112     pub const fn impl_unaligned(mut self) -> Self {
113         self.0.impl_.unaligned = true;
114         self
115     }
116 
117     #[must_use]
build(self) -> MemoryRegionOps<T>118     pub const fn build(self) -> MemoryRegionOps<T> {
119         MemoryRegionOps::<T>(self.0, PhantomData)
120     }
121 
122     #[must_use]
new() -> Self123     pub const fn new() -> Self {
124         Self(bindings::MemoryRegionOps::ZERO, PhantomData)
125     }
126 }
127 
128 impl<T> Default for MemoryRegionOpsBuilder<T> {
default() -> Self129     fn default() -> Self {
130         Self::new()
131     }
132 }
133 
134 /// A safe wrapper around [`bindings::MemoryRegion`].
135 #[repr(transparent)]
136 #[derive(qemu_api_macros::Wrapper)]
137 pub struct MemoryRegion(Opaque<bindings::MemoryRegion>);
138 
139 unsafe impl Send for MemoryRegion {}
140 unsafe impl Sync for MemoryRegion {}
141 
142 impl MemoryRegion {
143     // inline to ensure that it is not included in tests, which only
144     // link to hwcore and qom.  FIXME: inlining is actually the opposite
145     // of what we want, since this is the type-erased version of the
146     // init_io function below.  Look into splitting the qemu_api crate.
147     #[inline(always)]
do_init_io( slot: *mut bindings::MemoryRegion, owner: *mut Object, ops: &'static bindings::MemoryRegionOps, name: &'static str, size: u64, )148     unsafe fn do_init_io(
149         slot: *mut bindings::MemoryRegion,
150         owner: *mut Object,
151         ops: &'static bindings::MemoryRegionOps,
152         name: &'static str,
153         size: u64,
154     ) {
155         unsafe {
156             let cstr = CString::new(name).unwrap();
157             memory_region_init_io(
158                 slot,
159                 owner.cast::<bindings::Object>(),
160                 ops,
161                 owner.cast::<c_void>(),
162                 cstr.as_ptr(),
163                 size,
164             );
165         }
166     }
167 
init_io<T: IsA<Object>>( &mut self, owner: *mut T, ops: &'static MemoryRegionOps<T>, name: &'static str, size: u64, )168     pub fn init_io<T: IsA<Object>>(
169         &mut self,
170         owner: *mut T,
171         ops: &'static MemoryRegionOps<T>,
172         name: &'static str,
173         size: u64,
174     ) {
175         unsafe {
176             Self::do_init_io(
177                 self.0.as_mut_ptr(),
178                 owner.cast::<Object>(),
179                 &ops.0,
180                 name,
181                 size,
182             );
183         }
184     }
185 }
186 
187 unsafe impl ObjectType for MemoryRegion {
188     type Class = bindings::MemoryRegionClass;
189     const TYPE_NAME: &'static CStr =
190         unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) };
191 }
192 qom_isa!(MemoryRegion: Object);
193 
194 /// A special `MemTxAttrs` constant, used to indicate that no memory
195 /// attributes are specified.
196 ///
197 /// Bus masters which don't specify any attributes will get this,
198 /// which has all attribute bits clear except the topmost one
199 /// (so that we can distinguish "all attributes deliberately clear"
200 /// from "didn't specify" if necessary).
201 pub const MEMTXATTRS_UNSPECIFIED: MemTxAttrs = MemTxAttrs {
202     unspecified: true,
203     ..Zeroable::ZERO
204 };
205